mirror of https://github.com/OpenIPC/firmware.git
32298 lines
912 KiB
Diff
32298 lines
912 KiB
Diff
diff -urN linux-3.0.101/arch/arm/common/pl330.c linux-3.0.101.xm510/arch/arm/common/pl330.c
|
||
--- linux-3.0.101/arch/arm/common/pl330.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/common/pl330.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -241,8 +241,10 @@
|
||
#ifdef PL330_DEBUG_MCGEN
|
||
static unsigned cmd_line;
|
||
#define PL330_DBGCMD_DUMP(off, x...) do { \
|
||
+ printk("\033[1;35m"); \
|
||
printk("%x:", cmd_line); \
|
||
printk(x); \
|
||
+ printk("\033[m"); \
|
||
cmd_line += off; \
|
||
} while (0)
|
||
#define PL330_DBGMC_START(addr) (cmd_line = addr)
|
||
@@ -377,10 +379,10 @@
|
||
void __iomem *regs = pi->base;
|
||
u32 id = 0;
|
||
|
||
- id |= (readb(regs + off + 0x0) << 0);
|
||
- id |= (readb(regs + off + 0x4) << 8);
|
||
- id |= (readb(regs + off + 0x8) << 16);
|
||
- id |= (readb(regs + off + 0xc) << 24);
|
||
+ id |= (readl(regs + off + 0x0) << 0);
|
||
+ id |= (readl(regs + off + 0x4) << 8);
|
||
+ id |= (readl(regs + off + 0x8) << 16);
|
||
+ id |= (readl(regs + off + 0xc) << 24);
|
||
|
||
return id;
|
||
}
|
||
diff -urN linux-3.0.101/arch/arm/configs/xm510_full_defconfig linux-3.0.101.xm510/arch/arm/configs/xm510_full_defconfig
|
||
--- linux-3.0.101/arch/arm/configs/xm510_full_defconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/configs/xm510_full_defconfig 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -0,0 +1,1202 @@
|
||
+#
|
||
+# Automatically generated make config: don't edit
|
||
+# Linux/arm 3.0.101 Kernel Configuration
|
||
+#
|
||
+CONFIG_ARM=y
|
||
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
|
||
+CONFIG_HAVE_SCHED_CLOCK=y
|
||
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
|
||
+CONFIG_GENERIC_CLOCKEVENTS=y
|
||
+CONFIG_KTIME_SCALAR=y
|
||
+CONFIG_HAVE_PROC_CPU=y
|
||
+CONFIG_STACKTRACE_SUPPORT=y
|
||
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
|
||
+CONFIG_LOCKDEP_SUPPORT=y
|
||
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
|
||
+CONFIG_HARDIRQS_SW_RESEND=y
|
||
+CONFIG_GENERIC_IRQ_PROBE=y
|
||
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
|
||
+CONFIG_ARCH_HAS_CPUFREQ=y
|
||
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
|
||
+CONFIG_GENERIC_HWEIGHT=y
|
||
+CONFIG_GENERIC_CALIBRATE_DELAY=y
|
||
+CONFIG_NEED_DMA_MAP_STATE=y
|
||
+CONFIG_VECTORS_BASE=0xffff0000
|
||
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
||
+CONFIG_HAVE_IRQ_WORK=y
|
||
+
|
||
+#
|
||
+# General setup
|
||
+#
|
||
+# CONFIG_EXPERIMENTAL is not set
|
||
+CONFIG_BROKEN_ON_SMP=y
|
||
+CONFIG_INIT_ENV_ARG_LIMIT=32
|
||
+CONFIG_CROSS_COMPILE=""
|
||
+CONFIG_LOCALVERSION=""
|
||
+# CONFIG_LOCALVERSION_AUTO is not set
|
||
+CONFIG_HAVE_KERNEL_GZIP=y
|
||
+CONFIG_HAVE_KERNEL_LZMA=y
|
||
+CONFIG_HAVE_KERNEL_LZO=y
|
||
+# CONFIG_KERNEL_GZIP is not set
|
||
+# CONFIG_KERNEL_LZMA is not set
|
||
+CONFIG_KERNEL_LZO=y
|
||
+CONFIG_DEFAULT_HOSTNAME="(none)"
|
||
+# CONFIG_SWAP is not set
|
||
+CONFIG_SYSVIPC=y
|
||
+CONFIG_SYSVIPC_SYSCTL=y
|
||
+CONFIG_BSD_PROCESS_ACCT=y
|
||
+CONFIG_BSD_PROCESS_ACCT_V3=y
|
||
+# CONFIG_FHANDLE is not set
|
||
+CONFIG_TASKSTATS=y
|
||
+# CONFIG_TASK_DELAY_ACCT is not set
|
||
+CONFIG_TASK_XACCT=y
|
||
+CONFIG_TASK_IO_ACCOUNTING=y
|
||
+# CONFIG_AUDIT is not set
|
||
+CONFIG_HAVE_GENERIC_HARDIRQS=y
|
||
+
|
||
+#
|
||
+# IRQ subsystem
|
||
+#
|
||
+CONFIG_GENERIC_HARDIRQS=y
|
||
+CONFIG_HAVE_SPARSE_IRQ=y
|
||
+CONFIG_GENERIC_IRQ_SHOW=y
|
||
+CONFIG_SPARSE_IRQ=y
|
||
+
|
||
+#
|
||
+# RCU Subsystem
|
||
+#
|
||
+CONFIG_TINY_RCU=y
|
||
+# CONFIG_PREEMPT_RCU is not set
|
||
+# CONFIG_RCU_TRACE is not set
|
||
+# CONFIG_TREE_RCU_TRACE is not set
|
||
+# CONFIG_IKCONFIG is not set
|
||
+CONFIG_LOG_BUF_SHIFT=14
|
||
+# CONFIG_CGROUPS is not set
|
||
+# CONFIG_NAMESPACES is not set
|
||
+# CONFIG_SCHED_AUTOGROUP is not set
|
||
+# CONFIG_SYSFS_DEPRECATED is not set
|
||
+# CONFIG_RELAY is not set
|
||
+# CONFIG_BLK_DEV_INITRD is not set
|
||
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||
+CONFIG_SYSCTL=y
|
||
+CONFIG_ANON_INODES=y
|
||
+CONFIG_EXPERT=y
|
||
+CONFIG_UID16=y
|
||
+CONFIG_SYSCTL_SYSCALL=y
|
||
+# CONFIG_KALLSYMS is not set
|
||
+CONFIG_HOTPLUG=y
|
||
+CONFIG_PRINTK=y
|
||
+CONFIG_BUG=y
|
||
+# CONFIG_ELF_CORE is not set
|
||
+CONFIG_BASE_FULL=y
|
||
+CONFIG_FUTEX=y
|
||
+CONFIG_EPOLL=y
|
||
+CONFIG_SIGNALFD=y
|
||
+CONFIG_TIMERFD=y
|
||
+CONFIG_EVENTFD=y
|
||
+CONFIG_SHMEM=y
|
||
+# CONFIG_AIO is not set
|
||
+# CONFIG_EMBEDDED is not set
|
||
+CONFIG_HAVE_PERF_EVENTS=y
|
||
+CONFIG_PERF_USE_VMALLOC=y
|
||
+
|
||
+#
|
||
+# Kernel Performance Events And Counters
|
||
+#
|
||
+# CONFIG_PERF_EVENTS is not set
|
||
+# CONFIG_PERF_COUNTERS is not set
|
||
+# CONFIG_VM_EVENT_COUNTERS is not set
|
||
+# CONFIG_SLUB_DEBUG is not set
|
||
+# CONFIG_COMPAT_BRK is not set
|
||
+# CONFIG_SLAB is not set
|
||
+CONFIG_SLUB=y
|
||
+# CONFIG_SLOB is not set
|
||
+# CONFIG_PROFILING is not set
|
||
+CONFIG_HAVE_OPROFILE=y
|
||
+# CONFIG_KPROBES is not set
|
||
+CONFIG_HAVE_KPROBES=y
|
||
+CONFIG_HAVE_KRETPROBES=y
|
||
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
|
||
+CONFIG_HAVE_CLK=y
|
||
+CONFIG_HAVE_DMA_API_DEBUG=y
|
||
+
|
||
+#
|
||
+# GCOV-based kernel profiling
|
||
+#
|
||
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
|
||
+CONFIG_RT_MUTEXES=y
|
||
+CONFIG_BASE_SMALL=0
|
||
+CONFIG_MODULES=y
|
||
+# CONFIG_MODULE_FORCE_LOAD is not set
|
||
+CONFIG_MODULE_UNLOAD=y
|
||
+# CONFIG_MODVERSIONS is not set
|
||
+# CONFIG_MODULE_SRCVERSION_ALL is not set
|
||
+CONFIG_BLOCK=y
|
||
+# CONFIG_LBDAF is not set
|
||
+CONFIG_BLK_DEV_BSG=y
|
||
+# CONFIG_BLK_DEV_INTEGRITY is not set
|
||
+
|
||
+#
|
||
+# IO Schedulers
|
||
+#
|
||
+CONFIG_IOSCHED_NOOP=y
|
||
+CONFIG_IOSCHED_DEADLINE=y
|
||
+CONFIG_IOSCHED_CFQ=y
|
||
+CONFIG_DEFAULT_DEADLINE=y
|
||
+# CONFIG_DEFAULT_CFQ is not set
|
||
+# CONFIG_DEFAULT_NOOP is not set
|
||
+CONFIG_DEFAULT_IOSCHED="deadline"
|
||
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
|
||
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
|
||
+CONFIG_INLINE_SPIN_UNLOCK=y
|
||
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
|
||
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
|
||
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
|
||
+# CONFIG_INLINE_READ_TRYLOCK is not set
|
||
+# CONFIG_INLINE_READ_LOCK is not set
|
||
+# CONFIG_INLINE_READ_LOCK_BH is not set
|
||
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
|
||
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
|
||
+CONFIG_INLINE_READ_UNLOCK=y
|
||
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
|
||
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
|
||
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
|
||
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
|
||
+CONFIG_INLINE_WRITE_UNLOCK=y
|
||
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
|
||
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
|
||
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
|
||
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
|
||
+# CONFIG_FREEZER is not set
|
||
+
|
||
+#
|
||
+# System Type
|
||
+#
|
||
+CONFIG_MMU=y
|
||
+# CONFIG_ARCH_INTEGRATOR is not set
|
||
+# CONFIG_ARCH_REALVIEW is not set
|
||
+# CONFIG_ARCH_VERSATILE is not set
|
||
+# CONFIG_ARCH_VEXPRESS is not set
|
||
+# CONFIG_ARCH_AT91 is not set
|
||
+# CONFIG_ARCH_BCMRING is not set
|
||
+# CONFIG_ARCH_CLPS711X is not set
|
||
+# CONFIG_ARCH_CNS3XXX is not set
|
||
+# CONFIG_ARCH_GEMINI is not set
|
||
+# CONFIG_ARCH_EBSA110 is not set
|
||
+# CONFIG_ARCH_EP93XX is not set
|
||
+# CONFIG_ARCH_FOOTBRIDGE is not set
|
||
+# CONFIG_ARCH_MXC is not set
|
||
+# CONFIG_ARCH_MXS is not set
|
||
+# CONFIG_ARCH_NETX is not set
|
||
+# CONFIG_ARCH_H720X is not set
|
||
+# CONFIG_ARCH_IOP13XX is not set
|
||
+# CONFIG_ARCH_IOP32X is not set
|
||
+# CONFIG_ARCH_IOP33X is not set
|
||
+# CONFIG_ARCH_IXP23XX is not set
|
||
+# CONFIG_ARCH_IXP2000 is not set
|
||
+# CONFIG_ARCH_IXP4XX is not set
|
||
+# CONFIG_ARCH_DOVE is not set
|
||
+# CONFIG_ARCH_KIRKWOOD is not set
|
||
+# CONFIG_ARCH_LOKI is not set
|
||
+# CONFIG_ARCH_LPC32XX is not set
|
||
+# CONFIG_ARCH_MV78XX0 is not set
|
||
+# CONFIG_ARCH_ORION5X is not set
|
||
+# CONFIG_ARCH_MMP is not set
|
||
+# CONFIG_ARCH_KS8695 is not set
|
||
+# CONFIG_ARCH_W90X900 is not set
|
||
+# CONFIG_ARCH_NUC93X is not set
|
||
+# CONFIG_ARCH_TEGRA is not set
|
||
+# CONFIG_ARCH_PNX4008 is not set
|
||
+# CONFIG_ARCH_PXA is not set
|
||
+# CONFIG_ARCH_MSM is not set
|
||
+# CONFIG_ARCH_SHMOBILE is not set
|
||
+# CONFIG_ARCH_RPC is not set
|
||
+# CONFIG_ARCH_SA1100 is not set
|
||
+# CONFIG_ARCH_S3C2410 is not set
|
||
+# CONFIG_ARCH_S3C64XX is not set
|
||
+# CONFIG_ARCH_S5P64X0 is not set
|
||
+# CONFIG_ARCH_S5PC100 is not set
|
||
+# CONFIG_ARCH_S5PV210 is not set
|
||
+# CONFIG_ARCH_EXYNOS4 is not set
|
||
+# CONFIG_ARCH_SHARK is not set
|
||
+# CONFIG_ARCH_TCC_926 is not set
|
||
+# CONFIG_ARCH_U300 is not set
|
||
+# CONFIG_ARCH_U8500 is not set
|
||
+# CONFIG_ARCH_NOMADIK is not set
|
||
+# CONFIG_ARCH_DAVINCI is not set
|
||
+# CONFIG_ARCH_OMAP is not set
|
||
+# CONFIG_PLAT_SPEAR is not set
|
||
+# CONFIG_ARCH_VT8500 is not set
|
||
+CONFIG_ARCH_XM510=y
|
||
+# CONFIG_ARCH_XM520 is not set
|
||
+
|
||
+#
|
||
+# xm510 board feature
|
||
+#
|
||
+CONFIG_MACH_XM510=y
|
||
+CONFIG_DEFAULT_BUSCLK=50000000
|
||
+
|
||
+#
|
||
+# System MMU
|
||
+#
|
||
+
|
||
+#
|
||
+# Processor Type
|
||
+#
|
||
+CONFIG_CPU_V7=y
|
||
+CONFIG_CPU_32v6K=y
|
||
+CONFIG_CPU_32v7=y
|
||
+CONFIG_CPU_ABRT_EV7=y
|
||
+CONFIG_CPU_PABRT_V7=y
|
||
+CONFIG_CPU_CACHE_V7=y
|
||
+CONFIG_CPU_CACHE_VIPT=y
|
||
+CONFIG_CPU_COPY_V6=y
|
||
+CONFIG_CPU_TLB_V7=y
|
||
+CONFIG_CPU_HAS_ASID=y
|
||
+CONFIG_CPU_CP15=y
|
||
+CONFIG_CPU_CP15_MMU=y
|
||
+
|
||
+#
|
||
+# Processor Features
|
||
+#
|
||
+# CONFIG_ARM_THUMB is not set
|
||
+# CONFIG_ARM_THUMBEE is not set
|
||
+# CONFIG_SWP_EMULATE is not set
|
||
+# CONFIG_CPU_ICACHE_DISABLE is not set
|
||
+# CONFIG_CPU_DCACHE_DISABLE is not set
|
||
+# CONFIG_CPU_BPREDICT_DISABLE is not set
|
||
+CONFIG_ARM_L1_CACHE_SHIFT=5
|
||
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
|
||
+CONFIG_CPU_HAS_PMU=y
|
||
+# CONFIG_ARM_ERRATA_430973 is not set
|
||
+# CONFIG_ARM_ERRATA_458693 is not set
|
||
+# CONFIG_ARM_ERRATA_460075 is not set
|
||
+# CONFIG_ARM_ERRATA_743622 is not set
|
||
+# CONFIG_ARM_ERRATA_754322 is not set
|
||
+# CONFIG_ARM_ERRATA_775420 is not set
|
||
+CONFIG_ARM_VIC=y
|
||
+CONFIG_ARM_VIC_NR=2
|
||
+CONFIG_PL330=y
|
||
+
|
||
+#
|
||
+# Bus support
|
||
+#
|
||
+CONFIG_ARM_AMBA=y
|
||
+# CONFIG_PCI_SYSCALL is not set
|
||
+# CONFIG_ARCH_SUPPORTS_MSI is not set
|
||
+# CONFIG_PCCARD is not set
|
||
+
|
||
+#
|
||
+# Kernel Features
|
||
+#
|
||
+# CONFIG_NO_HZ is not set
|
||
+# CONFIG_HIGH_RES_TIMERS is not set
|
||
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
|
||
+CONFIG_VMSPLIT_3G=y
|
||
+# CONFIG_VMSPLIT_2G is not set
|
||
+# CONFIG_VMSPLIT_1G is not set
|
||
+CONFIG_PAGE_OFFSET=0xC0000000
|
||
+CONFIG_PREEMPT_NONE=y
|
||
+# CONFIG_PREEMPT_VOLUNTARY is not set
|
||
+# CONFIG_PREEMPT is not set
|
||
+CONFIG_HZ=100
|
||
+CONFIG_AEABI=y
|
||
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
|
||
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
|
||
+CONFIG_HAVE_ARCH_PFN_VALID=y
|
||
+# CONFIG_HIGHMEM is not set
|
||
+CONFIG_FLATMEM=y
|
||
+CONFIG_FLAT_NODE_MEM_MAP=y
|
||
+CONFIG_HAVE_MEMBLOCK=y
|
||
+CONFIG_PAGEFLAGS_EXTENDED=y
|
||
+CONFIG_SPLIT_PTLOCK_CPUS=4
|
||
+# CONFIG_COMPACTION is not set
|
||
+# CONFIG_PHYS_ADDR_T_64BIT is not set
|
||
+CONFIG_ZONE_DMA_FLAG=0
|
||
+CONFIG_VIRT_TO_BUS=y
|
||
+# CONFIG_KSM is not set
|
||
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
|
||
+CONFIG_NEED_PER_CPU_KM=y
|
||
+# CONFIG_CLEANCACHE is not set
|
||
+CONFIG_FORCE_MAX_ZONEORDER=11
|
||
+CONFIG_ALIGNMENT_TRAP=y
|
||
+# CONFIG_SECCOMP is not set
|
||
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
|
||
+
|
||
+#
|
||
+# Boot options
|
||
+#
|
||
+# CONFIG_USE_OF is not set
|
||
+CONFIG_ZBOOT_ROM_TEXT=0x0
|
||
+CONFIG_ZBOOT_ROM_BSS=0x0
|
||
+CONFIG_CMDLINE="mem=128M console=ttyAMA0,115200 console=ttyMTD,blackbox"
|
||
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
|
||
+# CONFIG_CMDLINE_EXTEND is not set
|
||
+# CONFIG_CMDLINE_FORCE is not set
|
||
+# CONFIG_XIP_KERNEL is not set
|
||
+# CONFIG_AUTO_ZRELADDR is not set
|
||
+
|
||
+#
|
||
+# CPU Power Management
|
||
+#
|
||
+
|
||
+#
|
||
+# CPU Frequency scaling
|
||
+#
|
||
+CONFIG_CPU_FREQ=y
|
||
+CONFIG_CPU_FREQ_TABLE=y
|
||
+CONFIG_CPU_FREQ_STAT=y
|
||
+CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
|
||
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
|
||
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||
+# CONFIG_CPU_IDLE is not set
|
||
+
|
||
+#
|
||
+# Floating point emulation
|
||
+#
|
||
+
|
||
+#
|
||
+# At least one emulation must be selected
|
||
+#
|
||
+# CONFIG_VFP is not set
|
||
+
|
||
+#
|
||
+# Userspace binary formats
|
||
+#
|
||
+CONFIG_BINFMT_ELF=y
|
||
+CONFIG_HAVE_AOUT=y
|
||
+# CONFIG_BINFMT_AOUT is not set
|
||
+# CONFIG_BINFMT_MISC is not set
|
||
+
|
||
+#
|
||
+# Power management options
|
||
+#
|
||
+# CONFIG_SUSPEND is not set
|
||
+# CONFIG_PM_RUNTIME is not set
|
||
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
|
||
+CONFIG_NET=y
|
||
+
|
||
+#
|
||
+# Networking options
|
||
+#
|
||
+CONFIG_PACKET=y
|
||
+CONFIG_UNIX=y
|
||
+CONFIG_XFRM=y
|
||
+# CONFIG_XFRM_USER is not set
|
||
+# CONFIG_NET_KEY is not set
|
||
+CONFIG_INET=y
|
||
+CONFIG_IP_MULTICAST=y
|
||
+# CONFIG_IP_ADVANCED_ROUTER is not set
|
||
+# CONFIG_IP_PNP is not set
|
||
+# CONFIG_NET_IPIP is not set
|
||
+# CONFIG_NET_IPGRE_DEMUX is not set
|
||
+# CONFIG_IP_MROUTE is not set
|
||
+# CONFIG_ARPD is not set
|
||
+# CONFIG_SYN_COOKIES is not set
|
||
+# CONFIG_INET_AH is not set
|
||
+# CONFIG_INET_ESP is not set
|
||
+# CONFIG_INET_IPCOMP is not set
|
||
+# CONFIG_INET_XFRM_TUNNEL is not set
|
||
+# CONFIG_INET_TUNNEL is not set
|
||
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
|
||
+CONFIG_INET_XFRM_MODE_TUNNEL=y
|
||
+CONFIG_INET_XFRM_MODE_BEET=y
|
||
+CONFIG_INET_LRO=y
|
||
+CONFIG_INET_DIAG=y
|
||
+CONFIG_INET_TCP_DIAG=y
|
||
+# CONFIG_TCP_CONG_ADVANCED is not set
|
||
+CONFIG_TCP_CONG_CUBIC=y
|
||
+CONFIG_DEFAULT_TCP_CONG="cubic"
|
||
+# CONFIG_IPV6 is not set
|
||
+# CONFIG_NETWORK_SECMARK is not set
|
||
+# CONFIG_NETFILTER is not set
|
||
+# CONFIG_ATM is not set
|
||
+# CONFIG_L2TP is not set
|
||
+# CONFIG_BRIDGE is not set
|
||
+# CONFIG_VLAN_8021Q is not set
|
||
+# CONFIG_DECNET is not set
|
||
+# CONFIG_LLC2 is not set
|
||
+# CONFIG_IPX is not set
|
||
+# CONFIG_ATALK is not set
|
||
+# CONFIG_PHONET is not set
|
||
+# CONFIG_NET_SCHED is not set
|
||
+# CONFIG_DCB is not set
|
||
+CONFIG_DNS_RESOLVER=y
|
||
+# CONFIG_BATMAN_ADV is not set
|
||
+
|
||
+#
|
||
+# Network testing
|
||
+#
|
||
+# CONFIG_NET_PKTGEN is not set
|
||
+# CONFIG_HAMRADIO is not set
|
||
+# CONFIG_CAN is not set
|
||
+# CONFIG_IRDA is not set
|
||
+# CONFIG_BT is not set
|
||
+CONFIG_WIRELESS=y
|
||
+CONFIG_WEXT_CORE=y
|
||
+CONFIG_WEXT_PROC=y
|
||
+CONFIG_CFG80211=m
|
||
+# CONFIG_NL80211_TESTMODE is not set
|
||
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
|
||
+# CONFIG_CFG80211_REG_DEBUG is not set
|
||
+CONFIG_CFG80211_DEFAULT_PS=y
|
||
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
|
||
+CONFIG_CFG80211_WEXT=y
|
||
+CONFIG_WIRELESS_EXT_SYSFS=y
|
||
+# CONFIG_LIB80211 is not set
|
||
+CONFIG_MAC80211=m
|
||
+CONFIG_MAC80211_HAS_RC=y
|
||
+# CONFIG_MAC80211_RC_PID is not set
|
||
+CONFIG_MAC80211_RC_MINSTREL=y
|
||
+CONFIG_MAC80211_RC_MINSTREL_HT=y
|
||
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
|
||
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
|
||
+# CONFIG_MAC80211_DEBUG_MENU is not set
|
||
+# CONFIG_WIMAX is not set
|
||
+# CONFIG_RFKILL is not set
|
||
+# CONFIG_NET_9P is not set
|
||
+# CONFIG_CAIF is not set
|
||
+
|
||
+#
|
||
+# Device Drivers
|
||
+#
|
||
+
|
||
+#
|
||
+# Generic Driver Options
|
||
+#
|
||
+CONFIG_UEVENT_HELPER_PATH="/sbin/mdev"
|
||
+# CONFIG_DEVTMPFS is not set
|
||
+CONFIG_STANDALONE=y
|
||
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||
+CONFIG_FW_LOADER=y
|
||
+# CONFIG_FIRMWARE_IN_KERNEL is not set
|
||
+CONFIG_EXTRA_FIRMWARE=""
|
||
+# CONFIG_SYS_HYPERVISOR is not set
|
||
+# CONFIG_CONNECTOR is not set
|
||
+CONFIG_MTD=y
|
||
+# CONFIG_MTD_DEBUG is not set
|
||
+# CONFIG_MTD_TESTS is not set
|
||
+# CONFIG_MTD_REDBOOT_PARTS is not set
|
||
+CONFIG_MTD_CMDLINE_PARTS=y
|
||
+# CONFIG_MTD_AFS_PARTS is not set
|
||
+# CONFIG_MTD_AR7_PARTS is not set
|
||
+
|
||
+#
|
||
+# User Modules And Translation Layers
|
||
+#
|
||
+CONFIG_MTD_CHAR=y
|
||
+CONFIG_MTD_BLKDEVS=y
|
||
+CONFIG_MTD_BLOCK=y
|
||
+# CONFIG_FTL is not set
|
||
+# CONFIG_NFTL is not set
|
||
+# CONFIG_INFTL is not set
|
||
+# CONFIG_RFD_FTL is not set
|
||
+# CONFIG_SSFDC is not set
|
||
+# CONFIG_MTD_OOPS is not set
|
||
+
|
||
+#
|
||
+# RAM/ROM/Flash chip drivers
|
||
+#
|
||
+# CONFIG_MTD_CFI is not set
|
||
+# CONFIG_MTD_JEDECPROBE is not set
|
||
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
|
||
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
|
||
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
|
||
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
|
||
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
|
||
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
|
||
+CONFIG_MTD_CFI_I1=y
|
||
+CONFIG_MTD_CFI_I2=y
|
||
+# CONFIG_MTD_CFI_I4 is not set
|
||
+# CONFIG_MTD_CFI_I8 is not set
|
||
+# CONFIG_MTD_RAM is not set
|
||
+# CONFIG_MTD_ROM is not set
|
||
+# CONFIG_MTD_ABSENT is not set
|
||
+
|
||
+#
|
||
+# Mapping drivers for chip access
|
||
+#
|
||
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
|
||
+# CONFIG_MTD_PLATRAM is not set
|
||
+
|
||
+#
|
||
+# Self-contained MTD device drivers
|
||
+#
|
||
+# CONFIG_MTD_SLRAM is not set
|
||
+# CONFIG_MTD_PHRAM is not set
|
||
+# CONFIG_MTD_MTDRAM is not set
|
||
+CONFIG_MTD_BLOCK2MTD=y
|
||
+
|
||
+#
|
||
+# Disk-On-Chip Device Drivers
|
||
+#
|
||
+# CONFIG_MTD_DOC2000 is not set
|
||
+# CONFIG_MTD_DOC2001 is not set
|
||
+# CONFIG_MTD_DOC2001PLUS is not set
|
||
+CONFIG_MTD_XMSFC=y
|
||
+# CONFIG_MTD_NAND is not set
|
||
+# CONFIG_MTD_ONENAND is not set
|
||
+
|
||
+#
|
||
+# LPDDR flash memory drivers
|
||
+#
|
||
+# CONFIG_MTD_LPDDR is not set
|
||
+# CONFIG_MTD_UBI is not set
|
||
+# CONFIG_PARPORT is not set
|
||
+CONFIG_BLK_DEV=y
|
||
+# CONFIG_BLK_DEV_COW_COMMON is not set
|
||
+# CONFIG_BLK_DEV_LOOP is not set
|
||
+
|
||
+#
|
||
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
|
||
+#
|
||
+# CONFIG_BLK_DEV_NBD is not set
|
||
+CONFIG_BLK_DEV_RAM=y
|
||
+CONFIG_BLK_DEV_RAM_COUNT=16
|
||
+CONFIG_BLK_DEV_RAM_SIZE=65536
|
||
+# CONFIG_BLK_DEV_XIP is not set
|
||
+# CONFIG_CDROM_PKTCDVD is not set
|
||
+# CONFIG_ATA_OVER_ETH is not set
|
||
+CONFIG_MISC_DEVICES=y
|
||
+# CONFIG_INTEL_MID_PTI is not set
|
||
+# CONFIG_ENCLOSURE_SERVICES is not set
|
||
+
|
||
+#
|
||
+# EEPROM support
|
||
+#
|
||
+# CONFIG_EEPROM_93CX6 is not set
|
||
+
|
||
+#
|
||
+# Texas Instruments shared transport line discipline
|
||
+#
|
||
+CONFIG_HAVE_IDE=y
|
||
+# CONFIG_IDE is not set
|
||
+
|
||
+#
|
||
+# SCSI device support
|
||
+#
|
||
+CONFIG_SCSI_MOD=y
|
||
+# CONFIG_RAID_ATTRS is not set
|
||
+# CONFIG_SCSI is not set
|
||
+# CONFIG_SCSI_DMA is not set
|
||
+# CONFIG_SCSI_NETLINK is not set
|
||
+# CONFIG_ATA is not set
|
||
+# CONFIG_MD is not set
|
||
+CONFIG_NETDEVICES=y
|
||
+# CONFIG_DUMMY is not set
|
||
+# CONFIG_BONDING is not set
|
||
+# CONFIG_EQUALIZER is not set
|
||
+# CONFIG_TUN is not set
|
||
+# CONFIG_VETH is not set
|
||
+CONFIG_MII=y
|
||
+CONFIG_PHYLIB=y
|
||
+
|
||
+#
|
||
+# MII PHY device drivers
|
||
+#
|
||
+# CONFIG_MARVELL_PHY is not set
|
||
+# CONFIG_DAVICOM_PHY is not set
|
||
+# CONFIG_QSEMI_PHY is not set
|
||
+# CONFIG_LXT_PHY is not set
|
||
+# CONFIG_CICADA_PHY is not set
|
||
+# CONFIG_VITESSE_PHY is not set
|
||
+# CONFIG_SMSC_PHY is not set
|
||
+# CONFIG_BROADCOM_PHY is not set
|
||
+# CONFIG_ICPLUS_PHY is not set
|
||
+# CONFIG_REALTEK_PHY is not set
|
||
+# CONFIG_NATIONAL_PHY is not set
|
||
+# CONFIG_STE10XP is not set
|
||
+# CONFIG_LSI_ET1011C_PHY is not set
|
||
+# CONFIG_MICREL_PHY is not set
|
||
+# CONFIG_FIXED_PHY is not set
|
||
+CONFIG_MDIO_BITBANG=y
|
||
+CONFIG_NET_ETHERNET=y
|
||
+# CONFIG_AX88796 is not set
|
||
+# CONFIG_SMC91X is not set
|
||
+# CONFIG_DM9000 is not set
|
||
+# CONFIG_ETHOC is not set
|
||
+# CONFIG_SMC911X is not set
|
||
+# CONFIG_SMSC911X is not set
|
||
+# CONFIG_DNET is not set
|
||
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
|
||
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
|
||
+# CONFIG_IBM_NEW_EMAC_TAH is not set
|
||
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
|
||
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
|
||
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
|
||
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
|
||
+# CONFIG_B44 is not set
|
||
+# CONFIG_KS8842 is not set
|
||
+# CONFIG_KS8851_MLL is not set
|
||
+# CONFIG_FTMAC100 is not set
|
||
+CONFIG_NETDEV_1000=y
|
||
+CONFIG_XMMAC_ETH=y
|
||
+# CONFIG_NETDEV_10000 is not set
|
||
+# CONFIG_WLAN is not set
|
||
+
|
||
+#
|
||
+# Enable WiMAX (Networking options) to see the WiMAX drivers
|
||
+#
|
||
+# CONFIG_WAN is not set
|
||
+
|
||
+#
|
||
+# CAIF transport drivers
|
||
+#
|
||
+CONFIG_PPP=y
|
||
+CONFIG_PPP_FILTER=y
|
||
+CONFIG_PPP_ASYNC=y
|
||
+CONFIG_PPP_SYNC_TTY=y
|
||
+CONFIG_PPP_DEFLATE=y
|
||
+CONFIG_PPP_BSDCOMP=y
|
||
+# CONFIG_SLIP is not set
|
||
+CONFIG_SLHC=y
|
||
+# CONFIG_NETCONSOLE is not set
|
||
+# CONFIG_NETPOLL is not set
|
||
+# CONFIG_NET_POLL_CONTROLLER is not set
|
||
+# CONFIG_ISDN is not set
|
||
+# CONFIG_PHONE is not set
|
||
+
|
||
+#
|
||
+# Input device support
|
||
+#
|
||
+# CONFIG_INPUT is not set
|
||
+
|
||
+#
|
||
+# Hardware I/O ports
|
||
+#
|
||
+CONFIG_SERIO=y
|
||
+CONFIG_SERIO_SERPORT=y
|
||
+# CONFIG_SERIO_AMBAKMI is not set
|
||
+CONFIG_SERIO_LIBPS2=y
|
||
+# CONFIG_SERIO_RAW is not set
|
||
+# CONFIG_SERIO_ALTERA_PS2 is not set
|
||
+# CONFIG_SERIO_PS2MULT is not set
|
||
+CONFIG_GAMEPORT=m
|
||
+CONFIG_GAMEPORT_NS558=m
|
||
+CONFIG_GAMEPORT_L4=m
|
||
+
|
||
+#
|
||
+# Character devices
|
||
+#
|
||
+# CONFIG_VT is not set
|
||
+CONFIG_UNIX98_PTYS=y
|
||
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
|
||
+# CONFIG_LEGACY_PTYS is not set
|
||
+# CONFIG_SERIAL_NONSTANDARD is not set
|
||
+# CONFIG_TRACE_SINK is not set
|
||
+# CONFIG_DEVKMEM is not set
|
||
+
|
||
+#
|
||
+# Serial drivers
|
||
+#
|
||
+# CONFIG_SERIAL_8250 is not set
|
||
+
|
||
+#
|
||
+# Non-8250 serial port support
|
||
+#
|
||
+# CONFIG_SERIAL_AMBA_PL010 is not set
|
||
+CONFIG_SERIAL_AMBA_PL011=y
|
||
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
|
||
+CONFIG_SERIAL_CORE=y
|
||
+CONFIG_SERIAL_CORE_CONSOLE=y
|
||
+# CONFIG_SERIAL_TIMBERDALE is not set
|
||
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
|
||
+# CONFIG_SERIAL_ALTERA_UART is not set
|
||
+# CONFIG_SERIAL_XILINX_PS_UART is not set
|
||
+# CONFIG_TTY_PRINTK is not set
|
||
+# CONFIG_HVC_DCC is not set
|
||
+# CONFIG_IPMI_HANDLER is not set
|
||
+# CONFIG_HW_RANDOM is not set
|
||
+# CONFIG_R3964 is not set
|
||
+# CONFIG_RAW_DRIVER is not set
|
||
+# CONFIG_RAMOOPS is not set
|
||
+# CONFIG_I2C is not set
|
||
+# CONFIG_SPI is not set
|
||
+
|
||
+#
|
||
+# PPS support
|
||
+#
|
||
+
|
||
+#
|
||
+# PPS generators support
|
||
+#
|
||
+
|
||
+#
|
||
+# PTP clock support
|
||
+#
|
||
+
|
||
+#
|
||
+# Enable Device Drivers -> PPS to see the PTP clock options.
|
||
+#
|
||
+# CONFIG_W1 is not set
|
||
+# CONFIG_POWER_SUPPLY is not set
|
||
+# CONFIG_HWMON is not set
|
||
+# CONFIG_THERMAL is not set
|
||
+# CONFIG_WATCHDOG is not set
|
||
+CONFIG_SSB_POSSIBLE=y
|
||
+
|
||
+#
|
||
+# Sonics Silicon Backplane
|
||
+#
|
||
+# CONFIG_SSB is not set
|
||
+CONFIG_BCMA_POSSIBLE=y
|
||
+
|
||
+#
|
||
+# Broadcom specific AMBA
|
||
+#
|
||
+# CONFIG_BCMA is not set
|
||
+# CONFIG_MFD_SUPPORT is not set
|
||
+# CONFIG_REGULATOR is not set
|
||
+# CONFIG_MEDIA_SUPPORT is not set
|
||
+
|
||
+#
|
||
+# Graphics support
|
||
+#
|
||
+# CONFIG_DRM is not set
|
||
+# CONFIG_VGASTATE is not set
|
||
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
|
||
+CONFIG_FB=y
|
||
+# CONFIG_FIRMWARE_EDID is not set
|
||
+# CONFIG_FB_DDC is not set
|
||
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
|
||
+# CONFIG_FB_CFB_FILLRECT is not set
|
||
+# CONFIG_FB_CFB_COPYAREA is not set
|
||
+# CONFIG_FB_CFB_IMAGEBLIT is not set
|
||
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
|
||
+# CONFIG_FB_SYS_FILLRECT is not set
|
||
+# CONFIG_FB_SYS_COPYAREA is not set
|
||
+# CONFIG_FB_SYS_IMAGEBLIT is not set
|
||
+# CONFIG_FB_FOREIGN_ENDIAN is not set
|
||
+# CONFIG_FB_SYS_FOPS is not set
|
||
+# CONFIG_FB_WMT_GE_ROPS is not set
|
||
+# CONFIG_FB_SVGALIB is not set
|
||
+# CONFIG_FB_MACMODES is not set
|
||
+# CONFIG_FB_BACKLIGHT is not set
|
||
+# CONFIG_FB_MODE_HELPERS is not set
|
||
+# CONFIG_FB_TILEBLITTING is not set
|
||
+
|
||
+#
|
||
+# Frame buffer hardware drivers
|
||
+#
|
||
+# CONFIG_FB_ARMCLCD is not set
|
||
+# CONFIG_FB_S1D13XXX is not set
|
||
+# CONFIG_FB_VIRTUAL is not set
|
||
+# CONFIG_FB_METRONOME is not set
|
||
+# CONFIG_FB_BROADSHEET is not set
|
||
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
|
||
+
|
||
+#
|
||
+# Display device support
|
||
+#
|
||
+# CONFIG_DISPLAY_SUPPORT is not set
|
||
+# CONFIG_LOGO is not set
|
||
+# CONFIG_SOUND is not set
|
||
+# CONFIG_USB_SUPPORT is not set
|
||
+CONFIG_MMC=y
|
||
+# CONFIG_MMC_DEBUG is not set
|
||
+# CONFIG_MMC_UNSAFE_RESUME is not set
|
||
+
|
||
+#
|
||
+# MMC/SD/SDIO Card Drivers
|
||
+#
|
||
+CONFIG_MMC_BLOCK=y
|
||
+CONFIG_MMC_BLOCK_MINORS=8
|
||
+CONFIG_MMC_BLOCK_BOUNCE=y
|
||
+# CONFIG_SDIO_UART is not set
|
||
+# CONFIG_MMC_TEST is not set
|
||
+
|
||
+#
|
||
+# MMC/SD/SDIO Host Controller Drivers
|
||
+#
|
||
+# CONFIG_MMC_ARMMMCI is not set
|
||
+# CONFIG_MMC_SDHCI is not set
|
||
+CONFIG_MMC_ARASAN=y
|
||
+CONFIG_MMC_WIFI=m
|
||
+CONFIG_MMC_SD=m
|
||
+# CONFIG_MMC_DW is not set
|
||
+# CONFIG_MEMSTICK is not set
|
||
+# CONFIG_NEW_LEDS is not set
|
||
+# CONFIG_NFC_DEVICES is not set
|
||
+# CONFIG_ACCESSIBILITY is not set
|
||
+CONFIG_RTC_LIB=y
|
||
+# CONFIG_RTC_CLASS is not set
|
||
+CONFIG_DMADEVICES=y
|
||
+# CONFIG_DMADEVICES_DEBUG is not set
|
||
+
|
||
+#
|
||
+# DMA Devices
|
||
+#
|
||
+# CONFIG_DW_DMAC is not set
|
||
+# CONFIG_TIMB_DMA is not set
|
||
+CONFIG_PL330_DMA=y
|
||
+CONFIG_DMA_ENGINE=y
|
||
+
|
||
+#
|
||
+# DMA Clients
|
||
+#
|
||
+# CONFIG_NET_DMA is not set
|
||
+# CONFIG_ASYNC_TX_DMA is not set
|
||
+# CONFIG_DMATEST is not set
|
||
+# CONFIG_AUXDISPLAY is not set
|
||
+# CONFIG_UIO is not set
|
||
+# CONFIG_STAGING is not set
|
||
+CONFIG_CLKDEV_LOOKUP=y
|
||
+
|
||
+#
|
||
+# File systems
|
||
+#
|
||
+# CONFIG_EXT2_FS is not set
|
||
+# CONFIG_EXT3_FS is not set
|
||
+# CONFIG_EXT4_FS is not set
|
||
+# CONFIG_REISERFS_FS is not set
|
||
+# CONFIG_JFS_FS is not set
|
||
+# CONFIG_XFS_FS is not set
|
||
+# CONFIG_OCFS2_FS is not set
|
||
+CONFIG_FS_POSIX_ACL=y
|
||
+CONFIG_FILE_LOCKING=y
|
||
+CONFIG_FSNOTIFY=y
|
||
+CONFIG_DNOTIFY=y
|
||
+CONFIG_INOTIFY_USER=y
|
||
+# CONFIG_FANOTIFY is not set
|
||
+# CONFIG_QUOTA is not set
|
||
+# CONFIG_QUOTACTL is not set
|
||
+CONFIG_AUTOFS4_FS=m
|
||
+CONFIG_FUSE_FS=y
|
||
+# CONFIG_CUSE is not set
|
||
+
|
||
+#
|
||
+# Caches
|
||
+#
|
||
+# CONFIG_FSCACHE is not set
|
||
+
|
||
+#
|
||
+# CD-ROM/DVD Filesystems
|
||
+#
|
||
+# CONFIG_ISO9660_FS is not set
|
||
+# CONFIG_UDF_FS is not set
|
||
+
|
||
+#
|
||
+# DOS/FAT/NT Filesystems
|
||
+#
|
||
+CONFIG_FAT_FS=y
|
||
+CONFIG_MSDOS_FS=y
|
||
+CONFIG_VFAT_FS=y
|
||
+CONFIG_FAT_DEFAULT_CODEPAGE=437
|
||
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
|
||
+# CONFIG_NTFS_FS is not set
|
||
+
|
||
+#
|
||
+# Pseudo filesystems
|
||
+#
|
||
+CONFIG_PROC_FS=y
|
||
+CONFIG_PROC_SYSCTL=y
|
||
+CONFIG_PROC_PAGE_MONITOR=y
|
||
+CONFIG_SYSFS=y
|
||
+CONFIG_TMPFS=y
|
||
+# CONFIG_TMPFS_POSIX_ACL is not set
|
||
+# CONFIG_TMPFS_XATTR is not set
|
||
+# CONFIG_HUGETLB_PAGE is not set
|
||
+CONFIG_CONFIGFS_FS=y
|
||
+CONFIG_MISC_FILESYSTEMS=y
|
||
+# CONFIG_HFSPLUS_FS is not set
|
||
+CONFIG_JFFS2_FS=y
|
||
+CONFIG_JFFS2_FS_DEBUG=0
|
||
+CONFIG_JFFS2_FS_WRITEBUFFER=y
|
||
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
|
||
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
|
||
+CONFIG_JFFS2_ZLIB=y
|
||
+CONFIG_JFFS2_LZO=y
|
||
+CONFIG_JFFS2_RTIME=y
|
||
+# CONFIG_JFFS2_RUBIN is not set
|
||
+# CONFIG_JFFS2_CMODE_NONE is not set
|
||
+# CONFIG_JFFS2_CMODE_PRIORITY is not set
|
||
+# CONFIG_JFFS2_CMODE_SIZE is not set
|
||
+CONFIG_JFFS2_CMODE_FAVOURLZO=y
|
||
+CONFIG_CRAMFS=y
|
||
+# CONFIG_SQUASHFS is not set
|
||
+# CONFIG_VXFS_FS is not set
|
||
+# CONFIG_MINIX_FS is not set
|
||
+# CONFIG_OMFS_FS is not set
|
||
+# CONFIG_HPFS_FS is not set
|
||
+# CONFIG_QNX4FS_FS is not set
|
||
+# CONFIG_ROMFS_FS is not set
|
||
+# CONFIG_PSTORE is not set
|
||
+# CONFIG_SYSV_FS is not set
|
||
+# CONFIG_UFS_FS is not set
|
||
+CONFIG_NETWORK_FILESYSTEMS=y
|
||
+CONFIG_NFS_FS=y
|
||
+CONFIG_NFS_V3=y
|
||
+CONFIG_NFS_V3_ACL=y
|
||
+CONFIG_NFS_V4=y
|
||
+# CONFIG_NFS_USE_LEGACY_DNS is not set
|
||
+CONFIG_NFS_USE_KERNEL_DNS=y
|
||
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
|
||
+# CONFIG_NFSD is not set
|
||
+CONFIG_LOCKD=y
|
||
+CONFIG_LOCKD_V4=y
|
||
+CONFIG_NFS_ACL_SUPPORT=y
|
||
+CONFIG_NFS_COMMON=y
|
||
+CONFIG_SUNRPC=y
|
||
+CONFIG_SUNRPC_GSS=y
|
||
+CONFIG_CIFS=y
|
||
+# CONFIG_CIFS_STATS is not set
|
||
+# CONFIG_CIFS_WEAK_PW_HASH is not set
|
||
+# CONFIG_CIFS_UPCALL is not set
|
||
+# CONFIG_CIFS_XATTR is not set
|
||
+# CONFIG_CIFS_DEBUG2 is not set
|
||
+# CONFIG_CIFS_DFS_UPCALL is not set
|
||
+# CONFIG_NCP_FS is not set
|
||
+# CONFIG_CODA_FS is not set
|
||
+
|
||
+#
|
||
+# Partition Types
|
||
+#
|
||
+CONFIG_PARTITION_ADVANCED=y
|
||
+# CONFIG_ACORN_PARTITION is not set
|
||
+# CONFIG_OSF_PARTITION is not set
|
||
+# CONFIG_AMIGA_PARTITION is not set
|
||
+# CONFIG_ATARI_PARTITION is not set
|
||
+# CONFIG_MAC_PARTITION is not set
|
||
+CONFIG_MSDOS_PARTITION=y
|
||
+# CONFIG_BSD_DISKLABEL is not set
|
||
+# CONFIG_MINIX_SUBPARTITION is not set
|
||
+# CONFIG_SOLARIS_X86_PARTITION is not set
|
||
+# CONFIG_UNIXWARE_DISKLABEL is not set
|
||
+# CONFIG_LDM_PARTITION is not set
|
||
+# CONFIG_SGI_PARTITION is not set
|
||
+# CONFIG_ULTRIX_PARTITION is not set
|
||
+# CONFIG_SUN_PARTITION is not set
|
||
+# CONFIG_KARMA_PARTITION is not set
|
||
+# CONFIG_EFI_PARTITION is not set
|
||
+# CONFIG_SYSV68_PARTITION is not set
|
||
+CONFIG_NLS=y
|
||
+CONFIG_NLS_DEFAULT="iso8859-1"
|
||
+CONFIG_NLS_CODEPAGE_437=y
|
||
+CONFIG_NLS_CODEPAGE_737=m
|
||
+CONFIG_NLS_CODEPAGE_775=m
|
||
+CONFIG_NLS_CODEPAGE_850=m
|
||
+CONFIG_NLS_CODEPAGE_852=m
|
||
+CONFIG_NLS_CODEPAGE_855=m
|
||
+CONFIG_NLS_CODEPAGE_857=m
|
||
+CONFIG_NLS_CODEPAGE_860=m
|
||
+CONFIG_NLS_CODEPAGE_861=m
|
||
+CONFIG_NLS_CODEPAGE_862=m
|
||
+CONFIG_NLS_CODEPAGE_863=m
|
||
+CONFIG_NLS_CODEPAGE_864=m
|
||
+CONFIG_NLS_CODEPAGE_865=m
|
||
+CONFIG_NLS_CODEPAGE_866=m
|
||
+CONFIG_NLS_CODEPAGE_869=m
|
||
+CONFIG_NLS_CODEPAGE_936=y
|
||
+CONFIG_NLS_CODEPAGE_950=m
|
||
+CONFIG_NLS_CODEPAGE_932=m
|
||
+CONFIG_NLS_CODEPAGE_949=m
|
||
+CONFIG_NLS_CODEPAGE_874=m
|
||
+CONFIG_NLS_ISO8859_8=m
|
||
+CONFIG_NLS_CODEPAGE_1250=m
|
||
+CONFIG_NLS_CODEPAGE_1251=m
|
||
+CONFIG_NLS_ASCII=y
|
||
+CONFIG_NLS_ISO8859_1=y
|
||
+CONFIG_NLS_ISO8859_2=m
|
||
+CONFIG_NLS_ISO8859_3=m
|
||
+CONFIG_NLS_ISO8859_4=m
|
||
+CONFIG_NLS_ISO8859_5=m
|
||
+CONFIG_NLS_ISO8859_6=m
|
||
+CONFIG_NLS_ISO8859_7=m
|
||
+CONFIG_NLS_ISO8859_9=m
|
||
+CONFIG_NLS_ISO8859_13=m
|
||
+CONFIG_NLS_ISO8859_14=m
|
||
+CONFIG_NLS_ISO8859_15=m
|
||
+CONFIG_NLS_KOI8_R=m
|
||
+CONFIG_NLS_KOI8_U=m
|
||
+CONFIG_NLS_UTF8=y
|
||
+
|
||
+#
|
||
+# Kernel hacking
|
||
+#
|
||
+# CONFIG_PRINTK_TIME is not set
|
||
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
|
||
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||
+# CONFIG_ENABLE_MUST_CHECK is not set
|
||
+CONFIG_FRAME_WARN=1024
|
||
+# CONFIG_MAGIC_SYSRQ is not set
|
||
+# CONFIG_STRIP_ASM_SYMS is not set
|
||
+# CONFIG_UNUSED_SYMBOLS is not set
|
||
+# CONFIG_DEBUG_FS is not set
|
||
+# CONFIG_HEADERS_CHECK is not set
|
||
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
|
||
+# CONFIG_DEBUG_KERNEL is not set
|
||
+# CONFIG_HARDLOCKUP_DETECTOR is not set
|
||
+# CONFIG_SLUB_STATS is not set
|
||
+# CONFIG_SPARSE_RCU_POINTER is not set
|
||
+CONFIG_DEBUG_BUGVERBOSE=y
|
||
+# CONFIG_DEBUG_MEMORY_INIT is not set
|
||
+CONFIG_FRAME_POINTER=y
|
||
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
|
||
+CONFIG_HAVE_FUNCTION_TRACER=y
|
||
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
|
||
+CONFIG_HAVE_DYNAMIC_FTRACE=y
|
||
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
|
||
+CONFIG_HAVE_C_RECORDMCOUNT=y
|
||
+CONFIG_TRACING_SUPPORT=y
|
||
+# CONFIG_FTRACE is not set
|
||
+# CONFIG_DMA_API_DEBUG is not set
|
||
+# CONFIG_ATOMIC64_SELFTEST is not set
|
||
+# CONFIG_SAMPLES is not set
|
||
+CONFIG_HAVE_ARCH_KGDB=y
|
||
+# CONFIG_TEST_KSTRTOX is not set
|
||
+# CONFIG_STRICT_DEVMEM is not set
|
||
+# CONFIG_DEBUG_USER is not set
|
||
+# CONFIG_OC_ETM is not set
|
||
+
|
||
+#
|
||
+# Security options
|
||
+#
|
||
+CONFIG_KEYS=y
|
||
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
|
||
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
|
||
+# CONFIG_SECURITY is not set
|
||
+# CONFIG_SECURITYFS is not set
|
||
+CONFIG_DEFAULT_SECURITY_DAC=y
|
||
+CONFIG_DEFAULT_SECURITY=""
|
||
+CONFIG_CRYPTO=y
|
||
+
|
||
+#
|
||
+# Crypto core or helper
|
||
+#
|
||
+CONFIG_CRYPTO_ALGAPI=y
|
||
+CONFIG_CRYPTO_ALGAPI2=y
|
||
+CONFIG_CRYPTO_AEAD2=y
|
||
+CONFIG_CRYPTO_BLKCIPHER=y
|
||
+CONFIG_CRYPTO_BLKCIPHER2=y
|
||
+CONFIG_CRYPTO_HASH=y
|
||
+CONFIG_CRYPTO_HASH2=y
|
||
+CONFIG_CRYPTO_RNG2=y
|
||
+CONFIG_CRYPTO_PCOMP2=y
|
||
+CONFIG_CRYPTO_MANAGER=y
|
||
+CONFIG_CRYPTO_MANAGER2=y
|
||
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
|
||
+# CONFIG_CRYPTO_GF128MUL is not set
|
||
+# CONFIG_CRYPTO_NULL is not set
|
||
+CONFIG_CRYPTO_WORKQUEUE=y
|
||
+# CONFIG_CRYPTO_CRYPTD is not set
|
||
+# CONFIG_CRYPTO_AUTHENC is not set
|
||
+# CONFIG_CRYPTO_TEST is not set
|
||
+
|
||
+#
|
||
+# Authenticated Encryption with Associated Data
|
||
+#
|
||
+# CONFIG_CRYPTO_CCM is not set
|
||
+# CONFIG_CRYPTO_GCM is not set
|
||
+# CONFIG_CRYPTO_SEQIV is not set
|
||
+
|
||
+#
|
||
+# Block modes
|
||
+#
|
||
+CONFIG_CRYPTO_CBC=y
|
||
+# CONFIG_CRYPTO_CTR is not set
|
||
+# CONFIG_CRYPTO_CTS is not set
|
||
+CONFIG_CRYPTO_ECB=y
|
||
+# CONFIG_CRYPTO_PCBC is not set
|
||
+
|
||
+#
|
||
+# Hash modes
|
||
+#
|
||
+CONFIG_CRYPTO_HMAC=y
|
||
+
|
||
+#
|
||
+# Digest
|
||
+#
|
||
+# CONFIG_CRYPTO_CRC32C is not set
|
||
+# CONFIG_CRYPTO_GHASH is not set
|
||
+CONFIG_CRYPTO_MD4=y
|
||
+CONFIG_CRYPTO_MD5=y
|
||
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
|
||
+# CONFIG_CRYPTO_RMD128 is not set
|
||
+# CONFIG_CRYPTO_RMD160 is not set
|
||
+# CONFIG_CRYPTO_RMD256 is not set
|
||
+# CONFIG_CRYPTO_RMD320 is not set
|
||
+# CONFIG_CRYPTO_SHA1 is not set
|
||
+# CONFIG_CRYPTO_SHA256 is not set
|
||
+# CONFIG_CRYPTO_SHA512 is not set
|
||
+# CONFIG_CRYPTO_TGR192 is not set
|
||
+# CONFIG_CRYPTO_WP512 is not set
|
||
+
|
||
+#
|
||
+# Ciphers
|
||
+#
|
||
+CONFIG_CRYPTO_AES=y
|
||
+# CONFIG_CRYPTO_ANUBIS is not set
|
||
+CONFIG_CRYPTO_ARC4=y
|
||
+# CONFIG_CRYPTO_BLOWFISH is not set
|
||
+# CONFIG_CRYPTO_CAMELLIA is not set
|
||
+# CONFIG_CRYPTO_CAST5 is not set
|
||
+# CONFIG_CRYPTO_CAST6 is not set
|
||
+CONFIG_CRYPTO_DES=y
|
||
+# CONFIG_CRYPTO_FCRYPT is not set
|
||
+# CONFIG_CRYPTO_KHAZAD is not set
|
||
+# CONFIG_CRYPTO_SEED is not set
|
||
+# CONFIG_CRYPTO_SERPENT is not set
|
||
+# CONFIG_CRYPTO_TEA is not set
|
||
+# CONFIG_CRYPTO_TWOFISH is not set
|
||
+
|
||
+#
|
||
+# Compression
|
||
+#
|
||
+# CONFIG_CRYPTO_DEFLATE is not set
|
||
+# CONFIG_CRYPTO_ZLIB is not set
|
||
+# CONFIG_CRYPTO_LZO is not set
|
||
+
|
||
+#
|
||
+# Random Number Generation
|
||
+#
|
||
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||
+# CONFIG_CRYPTO_USER_API_HASH is not set
|
||
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
|
||
+CONFIG_CRYPTO_HW=y
|
||
+# CONFIG_BINARY_PRINTF is not set
|
||
+
|
||
+#
|
||
+# Library routines
|
||
+#
|
||
+CONFIG_BITREVERSE=y
|
||
+CONFIG_CRC_CCITT=y
|
||
+CONFIG_CRC16=m
|
||
+# CONFIG_CRC_T10DIF is not set
|
||
+CONFIG_CRC_ITU_T=y
|
||
+CONFIG_CRC32=y
|
||
+# CONFIG_CRC7 is not set
|
||
+# CONFIG_LIBCRC32C is not set
|
||
+CONFIG_ZLIB_INFLATE=y
|
||
+CONFIG_ZLIB_DEFLATE=y
|
||
+CONFIG_LZO_COMPRESS=y
|
||
+CONFIG_LZO_DECOMPRESS=y
|
||
+CONFIG_XZ_DEC=y
|
||
+CONFIG_XZ_DEC_X86=y
|
||
+CONFIG_XZ_DEC_POWERPC=y
|
||
+CONFIG_XZ_DEC_IA64=y
|
||
+CONFIG_XZ_DEC_ARM=y
|
||
+CONFIG_XZ_DEC_ARMTHUMB=y
|
||
+CONFIG_XZ_DEC_SPARC=y
|
||
+CONFIG_XZ_DEC_BCJ=y
|
||
+# CONFIG_XZ_DEC_TEST is not set
|
||
+CONFIG_HAS_IOMEM=y
|
||
+CONFIG_HAS_IOPORT=y
|
||
+CONFIG_HAS_DMA=y
|
||
+CONFIG_NLATTR=y
|
||
+CONFIG_AVERAGE=y
|
||
diff -urN linux-3.0.101/arch/arm/configs/xm510_mini_defconfig linux-3.0.101.xm510/arch/arm/configs/xm510_mini_defconfig
|
||
--- linux-3.0.101/arch/arm/configs/xm510_mini_defconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/configs/xm510_mini_defconfig 2016-07-14 08:38:29.000000000 +0300
|
||
@@ -0,0 +1,1136 @@
|
||
+#
|
||
+# Automatically generated make config: don't edit
|
||
+# Linux/arm 3.0.101 Kernel Configuration
|
||
+#
|
||
+CONFIG_ARM=y
|
||
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
|
||
+CONFIG_HAVE_SCHED_CLOCK=y
|
||
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
|
||
+CONFIG_GENERIC_CLOCKEVENTS=y
|
||
+CONFIG_KTIME_SCALAR=y
|
||
+CONFIG_HAVE_PROC_CPU=y
|
||
+CONFIG_STACKTRACE_SUPPORT=y
|
||
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
|
||
+CONFIG_LOCKDEP_SUPPORT=y
|
||
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
|
||
+CONFIG_HARDIRQS_SW_RESEND=y
|
||
+CONFIG_GENERIC_IRQ_PROBE=y
|
||
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
|
||
+CONFIG_ARCH_HAS_CPUFREQ=y
|
||
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
|
||
+CONFIG_GENERIC_HWEIGHT=y
|
||
+CONFIG_GENERIC_CALIBRATE_DELAY=y
|
||
+CONFIG_NEED_DMA_MAP_STATE=y
|
||
+CONFIG_VECTORS_BASE=0xffff0000
|
||
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
||
+CONFIG_HAVE_IRQ_WORK=y
|
||
+
|
||
+#
|
||
+# General setup
|
||
+#
|
||
+# CONFIG_EXPERIMENTAL is not set
|
||
+CONFIG_BROKEN_ON_SMP=y
|
||
+CONFIG_INIT_ENV_ARG_LIMIT=32
|
||
+CONFIG_CROSS_COMPILE=""
|
||
+CONFIG_LOCALVERSION=""
|
||
+# CONFIG_LOCALVERSION_AUTO is not set
|
||
+CONFIG_HAVE_KERNEL_GZIP=y
|
||
+CONFIG_HAVE_KERNEL_LZMA=y
|
||
+CONFIG_HAVE_KERNEL_LZO=y
|
||
+# CONFIG_KERNEL_GZIP is not set
|
||
+# CONFIG_KERNEL_LZMA is not set
|
||
+CONFIG_KERNEL_LZO=y
|
||
+CONFIG_DEFAULT_HOSTNAME="(none)"
|
||
+# CONFIG_SWAP is not set
|
||
+CONFIG_SYSVIPC=y
|
||
+CONFIG_SYSVIPC_SYSCTL=y
|
||
+CONFIG_BSD_PROCESS_ACCT=y
|
||
+CONFIG_BSD_PROCESS_ACCT_V3=y
|
||
+# CONFIG_FHANDLE is not set
|
||
+CONFIG_TASKSTATS=y
|
||
+# CONFIG_TASK_DELAY_ACCT is not set
|
||
+CONFIG_TASK_XACCT=y
|
||
+CONFIG_TASK_IO_ACCOUNTING=y
|
||
+# CONFIG_AUDIT is not set
|
||
+CONFIG_HAVE_GENERIC_HARDIRQS=y
|
||
+
|
||
+#
|
||
+# IRQ subsystem
|
||
+#
|
||
+CONFIG_GENERIC_HARDIRQS=y
|
||
+CONFIG_HAVE_SPARSE_IRQ=y
|
||
+CONFIG_GENERIC_IRQ_SHOW=y
|
||
+CONFIG_SPARSE_IRQ=y
|
||
+
|
||
+#
|
||
+# RCU Subsystem
|
||
+#
|
||
+CONFIG_TINY_RCU=y
|
||
+# CONFIG_PREEMPT_RCU is not set
|
||
+# CONFIG_RCU_TRACE is not set
|
||
+# CONFIG_TREE_RCU_TRACE is not set
|
||
+# CONFIG_IKCONFIG is not set
|
||
+CONFIG_LOG_BUF_SHIFT=14
|
||
+# CONFIG_CGROUPS is not set
|
||
+# CONFIG_NAMESPACES is not set
|
||
+# CONFIG_SCHED_AUTOGROUP is not set
|
||
+# CONFIG_SYSFS_DEPRECATED is not set
|
||
+# CONFIG_RELAY is not set
|
||
+# CONFIG_BLK_DEV_INITRD is not set
|
||
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||
+CONFIG_SYSCTL=y
|
||
+CONFIG_ANON_INODES=y
|
||
+CONFIG_EXPERT=y
|
||
+CONFIG_UID16=y
|
||
+CONFIG_SYSCTL_SYSCALL=y
|
||
+# CONFIG_KALLSYMS is not set
|
||
+CONFIG_HOTPLUG=y
|
||
+CONFIG_PRINTK=y
|
||
+CONFIG_BUG=y
|
||
+# CONFIG_ELF_CORE is not set
|
||
+CONFIG_BASE_FULL=y
|
||
+CONFIG_FUTEX=y
|
||
+CONFIG_EPOLL=y
|
||
+CONFIG_SIGNALFD=y
|
||
+CONFIG_TIMERFD=y
|
||
+CONFIG_EVENTFD=y
|
||
+CONFIG_SHMEM=y
|
||
+# CONFIG_AIO is not set
|
||
+# CONFIG_EMBEDDED is not set
|
||
+CONFIG_HAVE_PERF_EVENTS=y
|
||
+CONFIG_PERF_USE_VMALLOC=y
|
||
+
|
||
+#
|
||
+# Kernel Performance Events And Counters
|
||
+#
|
||
+# CONFIG_PERF_EVENTS is not set
|
||
+# CONFIG_PERF_COUNTERS is not set
|
||
+# CONFIG_VM_EVENT_COUNTERS is not set
|
||
+# CONFIG_SLUB_DEBUG is not set
|
||
+# CONFIG_COMPAT_BRK is not set
|
||
+# CONFIG_SLAB is not set
|
||
+CONFIG_SLUB=y
|
||
+# CONFIG_SLOB is not set
|
||
+# CONFIG_PROFILING is not set
|
||
+CONFIG_HAVE_OPROFILE=y
|
||
+# CONFIG_KPROBES is not set
|
||
+CONFIG_HAVE_KPROBES=y
|
||
+CONFIG_HAVE_KRETPROBES=y
|
||
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
|
||
+CONFIG_HAVE_CLK=y
|
||
+CONFIG_HAVE_DMA_API_DEBUG=y
|
||
+
|
||
+#
|
||
+# GCOV-based kernel profiling
|
||
+#
|
||
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
|
||
+CONFIG_RT_MUTEXES=y
|
||
+CONFIG_BASE_SMALL=0
|
||
+CONFIG_MODULES=y
|
||
+# CONFIG_MODULE_FORCE_LOAD is not set
|
||
+CONFIG_MODULE_UNLOAD=y
|
||
+# CONFIG_MODVERSIONS is not set
|
||
+# CONFIG_MODULE_SRCVERSION_ALL is not set
|
||
+CONFIG_BLOCK=y
|
||
+# CONFIG_LBDAF is not set
|
||
+CONFIG_BLK_DEV_BSG=y
|
||
+# CONFIG_BLK_DEV_INTEGRITY is not set
|
||
+
|
||
+#
|
||
+# IO Schedulers
|
||
+#
|
||
+CONFIG_IOSCHED_NOOP=y
|
||
+CONFIG_IOSCHED_DEADLINE=y
|
||
+CONFIG_IOSCHED_CFQ=y
|
||
+CONFIG_DEFAULT_DEADLINE=y
|
||
+# CONFIG_DEFAULT_CFQ is not set
|
||
+# CONFIG_DEFAULT_NOOP is not set
|
||
+CONFIG_DEFAULT_IOSCHED="deadline"
|
||
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
|
||
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
|
||
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
|
||
+CONFIG_INLINE_SPIN_UNLOCK=y
|
||
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
|
||
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
|
||
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
|
||
+# CONFIG_INLINE_READ_TRYLOCK is not set
|
||
+# CONFIG_INLINE_READ_LOCK is not set
|
||
+# CONFIG_INLINE_READ_LOCK_BH is not set
|
||
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
|
||
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
|
||
+CONFIG_INLINE_READ_UNLOCK=y
|
||
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
|
||
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
|
||
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
|
||
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
|
||
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
|
||
+CONFIG_INLINE_WRITE_UNLOCK=y
|
||
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
|
||
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
|
||
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
|
||
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
|
||
+# CONFIG_FREEZER is not set
|
||
+
|
||
+#
|
||
+# System Type
|
||
+#
|
||
+CONFIG_MMU=y
|
||
+# CONFIG_ARCH_INTEGRATOR is not set
|
||
+# CONFIG_ARCH_REALVIEW is not set
|
||
+# CONFIG_ARCH_VERSATILE is not set
|
||
+# CONFIG_ARCH_VEXPRESS is not set
|
||
+# CONFIG_ARCH_AT91 is not set
|
||
+# CONFIG_ARCH_BCMRING is not set
|
||
+# CONFIG_ARCH_CLPS711X is not set
|
||
+# CONFIG_ARCH_CNS3XXX is not set
|
||
+# CONFIG_ARCH_GEMINI is not set
|
||
+# CONFIG_ARCH_EBSA110 is not set
|
||
+# CONFIG_ARCH_EP93XX is not set
|
||
+# CONFIG_ARCH_FOOTBRIDGE is not set
|
||
+# CONFIG_ARCH_MXC is not set
|
||
+# CONFIG_ARCH_MXS is not set
|
||
+# CONFIG_ARCH_NETX is not set
|
||
+# CONFIG_ARCH_H720X is not set
|
||
+# CONFIG_ARCH_IOP13XX is not set
|
||
+# CONFIG_ARCH_IOP32X is not set
|
||
+# CONFIG_ARCH_IOP33X is not set
|
||
+# CONFIG_ARCH_IXP23XX is not set
|
||
+# CONFIG_ARCH_IXP2000 is not set
|
||
+# CONFIG_ARCH_IXP4XX is not set
|
||
+# CONFIG_ARCH_DOVE is not set
|
||
+# CONFIG_ARCH_KIRKWOOD is not set
|
||
+# CONFIG_ARCH_LOKI is not set
|
||
+# CONFIG_ARCH_LPC32XX is not set
|
||
+# CONFIG_ARCH_MV78XX0 is not set
|
||
+# CONFIG_ARCH_ORION5X is not set
|
||
+# CONFIG_ARCH_MMP is not set
|
||
+# CONFIG_ARCH_KS8695 is not set
|
||
+# CONFIG_ARCH_W90X900 is not set
|
||
+# CONFIG_ARCH_NUC93X is not set
|
||
+# CONFIG_ARCH_TEGRA is not set
|
||
+# CONFIG_ARCH_PNX4008 is not set
|
||
+# CONFIG_ARCH_PXA is not set
|
||
+# CONFIG_ARCH_MSM is not set
|
||
+# CONFIG_ARCH_SHMOBILE is not set
|
||
+# CONFIG_ARCH_RPC is not set
|
||
+# CONFIG_ARCH_SA1100 is not set
|
||
+# CONFIG_ARCH_S3C2410 is not set
|
||
+# CONFIG_ARCH_S3C64XX is not set
|
||
+# CONFIG_ARCH_S5P64X0 is not set
|
||
+# CONFIG_ARCH_S5PC100 is not set
|
||
+# CONFIG_ARCH_S5PV210 is not set
|
||
+# CONFIG_ARCH_EXYNOS4 is not set
|
||
+# CONFIG_ARCH_SHARK is not set
|
||
+# CONFIG_ARCH_TCC_926 is not set
|
||
+# CONFIG_ARCH_U300 is not set
|
||
+# CONFIG_ARCH_U8500 is not set
|
||
+# CONFIG_ARCH_NOMADIK is not set
|
||
+# CONFIG_ARCH_DAVINCI is not set
|
||
+# CONFIG_ARCH_OMAP is not set
|
||
+# CONFIG_PLAT_SPEAR is not set
|
||
+# CONFIG_ARCH_VT8500 is not set
|
||
+CONFIG_ARCH_XM510=y
|
||
+# CONFIG_ARCH_XM520 is not set
|
||
+
|
||
+#
|
||
+# xm510 board feature
|
||
+#
|
||
+CONFIG_MACH_XM510=y
|
||
+CONFIG_DEFAULT_BUSCLK=50000000
|
||
+
|
||
+#
|
||
+# System MMU
|
||
+#
|
||
+
|
||
+#
|
||
+# Processor Type
|
||
+#
|
||
+CONFIG_CPU_V7=y
|
||
+CONFIG_CPU_32v6K=y
|
||
+CONFIG_CPU_32v7=y
|
||
+CONFIG_CPU_ABRT_EV7=y
|
||
+CONFIG_CPU_PABRT_V7=y
|
||
+CONFIG_CPU_CACHE_V7=y
|
||
+CONFIG_CPU_CACHE_VIPT=y
|
||
+CONFIG_CPU_COPY_V6=y
|
||
+CONFIG_CPU_TLB_V7=y
|
||
+CONFIG_CPU_HAS_ASID=y
|
||
+CONFIG_CPU_CP15=y
|
||
+CONFIG_CPU_CP15_MMU=y
|
||
+
|
||
+#
|
||
+# Processor Features
|
||
+#
|
||
+# CONFIG_ARM_THUMB is not set
|
||
+# CONFIG_ARM_THUMBEE is not set
|
||
+# CONFIG_SWP_EMULATE is not set
|
||
+# CONFIG_CPU_ICACHE_DISABLE is not set
|
||
+# CONFIG_CPU_DCACHE_DISABLE is not set
|
||
+# CONFIG_CPU_BPREDICT_DISABLE is not set
|
||
+CONFIG_ARM_L1_CACHE_SHIFT=5
|
||
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
|
||
+CONFIG_CPU_HAS_PMU=y
|
||
+# CONFIG_ARM_ERRATA_430973 is not set
|
||
+# CONFIG_ARM_ERRATA_458693 is not set
|
||
+# CONFIG_ARM_ERRATA_460075 is not set
|
||
+# CONFIG_ARM_ERRATA_743622 is not set
|
||
+# CONFIG_ARM_ERRATA_754322 is not set
|
||
+# CONFIG_ARM_ERRATA_775420 is not set
|
||
+CONFIG_ARM_VIC=y
|
||
+CONFIG_ARM_VIC_NR=2
|
||
+CONFIG_PL330=y
|
||
+
|
||
+#
|
||
+# Bus support
|
||
+#
|
||
+CONFIG_ARM_AMBA=y
|
||
+# CONFIG_PCI_SYSCALL is not set
|
||
+# CONFIG_ARCH_SUPPORTS_MSI is not set
|
||
+# CONFIG_PCCARD is not set
|
||
+
|
||
+#
|
||
+# Kernel Features
|
||
+#
|
||
+# CONFIG_NO_HZ is not set
|
||
+# CONFIG_HIGH_RES_TIMERS is not set
|
||
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
|
||
+CONFIG_VMSPLIT_3G=y
|
||
+# CONFIG_VMSPLIT_2G is not set
|
||
+# CONFIG_VMSPLIT_1G is not set
|
||
+CONFIG_PAGE_OFFSET=0xC0000000
|
||
+CONFIG_PREEMPT_NONE=y
|
||
+# CONFIG_PREEMPT_VOLUNTARY is not set
|
||
+# CONFIG_PREEMPT is not set
|
||
+CONFIG_HZ=100
|
||
+CONFIG_AEABI=y
|
||
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
|
||
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
|
||
+CONFIG_HAVE_ARCH_PFN_VALID=y
|
||
+# CONFIG_HIGHMEM is not set
|
||
+CONFIG_FLATMEM=y
|
||
+CONFIG_FLAT_NODE_MEM_MAP=y
|
||
+CONFIG_HAVE_MEMBLOCK=y
|
||
+CONFIG_PAGEFLAGS_EXTENDED=y
|
||
+CONFIG_SPLIT_PTLOCK_CPUS=4
|
||
+# CONFIG_COMPACTION is not set
|
||
+# CONFIG_PHYS_ADDR_T_64BIT is not set
|
||
+CONFIG_ZONE_DMA_FLAG=0
|
||
+CONFIG_VIRT_TO_BUS=y
|
||
+# CONFIG_KSM is not set
|
||
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
|
||
+CONFIG_NEED_PER_CPU_KM=y
|
||
+# CONFIG_CLEANCACHE is not set
|
||
+CONFIG_FORCE_MAX_ZONEORDER=11
|
||
+CONFIG_ALIGNMENT_TRAP=y
|
||
+# CONFIG_SECCOMP is not set
|
||
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
|
||
+
|
||
+#
|
||
+# Boot options
|
||
+#
|
||
+# CONFIG_USE_OF is not set
|
||
+CONFIG_ZBOOT_ROM_TEXT=0x0
|
||
+CONFIG_ZBOOT_ROM_BSS=0x0
|
||
+CONFIG_CMDLINE="mem=128M console=ttyAMA0,115200 console=ttyMTD,blackbox"
|
||
+CONFIG_CMDLINE_FROM_BOOTLOADER=y
|
||
+# CONFIG_CMDLINE_EXTEND is not set
|
||
+# CONFIG_CMDLINE_FORCE is not set
|
||
+# CONFIG_XIP_KERNEL is not set
|
||
+# CONFIG_AUTO_ZRELADDR is not set
|
||
+
|
||
+#
|
||
+# CPU Power Management
|
||
+#
|
||
+
|
||
+#
|
||
+# CPU Frequency scaling
|
||
+#
|
||
+CONFIG_CPU_FREQ=y
|
||
+CONFIG_CPU_FREQ_TABLE=y
|
||
+CONFIG_CPU_FREQ_STAT=y
|
||
+CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
|
||
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
|
||
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
|
||
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||
+# CONFIG_CPU_IDLE is not set
|
||
+
|
||
+#
|
||
+# Floating point emulation
|
||
+#
|
||
+
|
||
+#
|
||
+# At least one emulation must be selected
|
||
+#
|
||
+# CONFIG_VFP is not set
|
||
+
|
||
+#
|
||
+# Userspace binary formats
|
||
+#
|
||
+CONFIG_BINFMT_ELF=y
|
||
+CONFIG_HAVE_AOUT=y
|
||
+# CONFIG_BINFMT_AOUT is not set
|
||
+# CONFIG_BINFMT_MISC is not set
|
||
+
|
||
+#
|
||
+# Power management options
|
||
+#
|
||
+# CONFIG_SUSPEND is not set
|
||
+# CONFIG_PM_RUNTIME is not set
|
||
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
|
||
+CONFIG_NET=y
|
||
+
|
||
+#
|
||
+# Networking options
|
||
+#
|
||
+CONFIG_PACKET=y
|
||
+CONFIG_UNIX=y
|
||
+CONFIG_XFRM=y
|
||
+# CONFIG_XFRM_USER is not set
|
||
+# CONFIG_NET_KEY is not set
|
||
+CONFIG_INET=y
|
||
+CONFIG_IP_MULTICAST=y
|
||
+# CONFIG_IP_ADVANCED_ROUTER is not set
|
||
+# CONFIG_IP_PNP is not set
|
||
+# CONFIG_NET_IPIP is not set
|
||
+# CONFIG_NET_IPGRE_DEMUX is not set
|
||
+# CONFIG_IP_MROUTE is not set
|
||
+# CONFIG_ARPD is not set
|
||
+# CONFIG_SYN_COOKIES is not set
|
||
+# CONFIG_INET_AH is not set
|
||
+# CONFIG_INET_ESP is not set
|
||
+# CONFIG_INET_IPCOMP is not set
|
||
+# CONFIG_INET_XFRM_TUNNEL is not set
|
||
+# CONFIG_INET_TUNNEL is not set
|
||
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
|
||
+CONFIG_INET_XFRM_MODE_TUNNEL=y
|
||
+CONFIG_INET_XFRM_MODE_BEET=y
|
||
+CONFIG_INET_LRO=y
|
||
+CONFIG_INET_DIAG=y
|
||
+CONFIG_INET_TCP_DIAG=y
|
||
+# CONFIG_TCP_CONG_ADVANCED is not set
|
||
+CONFIG_TCP_CONG_CUBIC=y
|
||
+CONFIG_DEFAULT_TCP_CONG="cubic"
|
||
+# CONFIG_IPV6 is not set
|
||
+# CONFIG_NETWORK_SECMARK is not set
|
||
+# CONFIG_NETFILTER is not set
|
||
+# CONFIG_ATM is not set
|
||
+# CONFIG_L2TP is not set
|
||
+# CONFIG_BRIDGE is not set
|
||
+# CONFIG_VLAN_8021Q is not set
|
||
+# CONFIG_DECNET is not set
|
||
+# CONFIG_LLC2 is not set
|
||
+# CONFIG_IPX is not set
|
||
+# CONFIG_ATALK is not set
|
||
+# CONFIG_PHONET is not set
|
||
+# CONFIG_NET_SCHED is not set
|
||
+# CONFIG_DCB is not set
|
||
+CONFIG_DNS_RESOLVER=y
|
||
+# CONFIG_BATMAN_ADV is not set
|
||
+
|
||
+#
|
||
+# Network testing
|
||
+#
|
||
+# CONFIG_NET_PKTGEN is not set
|
||
+# CONFIG_HAMRADIO is not set
|
||
+# CONFIG_CAN is not set
|
||
+# CONFIG_IRDA is not set
|
||
+# CONFIG_BT is not set
|
||
+# CONFIG_WIRELESS is not set
|
||
+# CONFIG_WIMAX is not set
|
||
+# CONFIG_RFKILL is not set
|
||
+# CONFIG_NET_9P is not set
|
||
+# CONFIG_CAIF is not set
|
||
+
|
||
+#
|
||
+# Device Drivers
|
||
+#
|
||
+
|
||
+#
|
||
+# Generic Driver Options
|
||
+#
|
||
+CONFIG_UEVENT_HELPER_PATH="/sbin/mdev"
|
||
+# CONFIG_DEVTMPFS is not set
|
||
+CONFIG_STANDALONE=y
|
||
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||
+CONFIG_FW_LOADER=y
|
||
+# CONFIG_FIRMWARE_IN_KERNEL is not set
|
||
+CONFIG_EXTRA_FIRMWARE=""
|
||
+# CONFIG_SYS_HYPERVISOR is not set
|
||
+# CONFIG_CONNECTOR is not set
|
||
+CONFIG_MTD=y
|
||
+# CONFIG_MTD_DEBUG is not set
|
||
+# CONFIG_MTD_TESTS is not set
|
||
+# CONFIG_MTD_REDBOOT_PARTS is not set
|
||
+CONFIG_MTD_CMDLINE_PARTS=y
|
||
+# CONFIG_MTD_AFS_PARTS is not set
|
||
+# CONFIG_MTD_AR7_PARTS is not set
|
||
+
|
||
+#
|
||
+# User Modules And Translation Layers
|
||
+#
|
||
+CONFIG_MTD_CHAR=y
|
||
+CONFIG_MTD_BLKDEVS=y
|
||
+CONFIG_MTD_BLOCK=y
|
||
+# CONFIG_FTL is not set
|
||
+# CONFIG_NFTL is not set
|
||
+# CONFIG_INFTL is not set
|
||
+# CONFIG_RFD_FTL is not set
|
||
+# CONFIG_SSFDC is not set
|
||
+# CONFIG_MTD_OOPS is not set
|
||
+
|
||
+#
|
||
+# RAM/ROM/Flash chip drivers
|
||
+#
|
||
+# CONFIG_MTD_CFI is not set
|
||
+# CONFIG_MTD_JEDECPROBE is not set
|
||
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
|
||
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
|
||
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
|
||
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
|
||
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
|
||
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
|
||
+CONFIG_MTD_CFI_I1=y
|
||
+CONFIG_MTD_CFI_I2=y
|
||
+# CONFIG_MTD_CFI_I4 is not set
|
||
+# CONFIG_MTD_CFI_I8 is not set
|
||
+# CONFIG_MTD_RAM is not set
|
||
+# CONFIG_MTD_ROM is not set
|
||
+# CONFIG_MTD_ABSENT is not set
|
||
+
|
||
+#
|
||
+# Mapping drivers for chip access
|
||
+#
|
||
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
|
||
+# CONFIG_MTD_PLATRAM is not set
|
||
+
|
||
+#
|
||
+# Self-contained MTD device drivers
|
||
+#
|
||
+# CONFIG_MTD_SLRAM is not set
|
||
+# CONFIG_MTD_PHRAM is not set
|
||
+# CONFIG_MTD_MTDRAM is not set
|
||
+CONFIG_MTD_BLOCK2MTD=y
|
||
+
|
||
+#
|
||
+# Disk-On-Chip Device Drivers
|
||
+#
|
||
+# CONFIG_MTD_DOC2000 is not set
|
||
+# CONFIG_MTD_DOC2001 is not set
|
||
+# CONFIG_MTD_DOC2001PLUS is not set
|
||
+CONFIG_MTD_XMSFC=y
|
||
+# CONFIG_MTD_NAND is not set
|
||
+# CONFIG_MTD_ONENAND is not set
|
||
+
|
||
+#
|
||
+# LPDDR flash memory drivers
|
||
+#
|
||
+# CONFIG_MTD_LPDDR is not set
|
||
+# CONFIG_MTD_UBI is not set
|
||
+# CONFIG_PARPORT is not set
|
||
+CONFIG_BLK_DEV=y
|
||
+# CONFIG_BLK_DEV_COW_COMMON is not set
|
||
+# CONFIG_BLK_DEV_LOOP is not set
|
||
+
|
||
+#
|
||
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
|
||
+#
|
||
+# CONFIG_BLK_DEV_NBD is not set
|
||
+CONFIG_BLK_DEV_RAM=y
|
||
+CONFIG_BLK_DEV_RAM_COUNT=16
|
||
+CONFIG_BLK_DEV_RAM_SIZE=65536
|
||
+# CONFIG_BLK_DEV_XIP is not set
|
||
+# CONFIG_CDROM_PKTCDVD is not set
|
||
+# CONFIG_ATA_OVER_ETH is not set
|
||
+CONFIG_MISC_DEVICES=y
|
||
+# CONFIG_INTEL_MID_PTI is not set
|
||
+# CONFIG_ENCLOSURE_SERVICES is not set
|
||
+
|
||
+#
|
||
+# EEPROM support
|
||
+#
|
||
+# CONFIG_EEPROM_93CX6 is not set
|
||
+
|
||
+#
|
||
+# Texas Instruments shared transport line discipline
|
||
+#
|
||
+CONFIG_HAVE_IDE=y
|
||
+# CONFIG_IDE is not set
|
||
+
|
||
+#
|
||
+# SCSI device support
|
||
+#
|
||
+CONFIG_SCSI_MOD=y
|
||
+# CONFIG_RAID_ATTRS is not set
|
||
+# CONFIG_SCSI is not set
|
||
+# CONFIG_SCSI_DMA is not set
|
||
+# CONFIG_SCSI_NETLINK is not set
|
||
+# CONFIG_ATA is not set
|
||
+# CONFIG_MD is not set
|
||
+CONFIG_NETDEVICES=y
|
||
+# CONFIG_DUMMY is not set
|
||
+# CONFIG_BONDING is not set
|
||
+# CONFIG_EQUALIZER is not set
|
||
+# CONFIG_TUN is not set
|
||
+# CONFIG_VETH is not set
|
||
+CONFIG_MII=y
|
||
+CONFIG_PHYLIB=y
|
||
+
|
||
+#
|
||
+# MII PHY device drivers
|
||
+#
|
||
+# CONFIG_MARVELL_PHY is not set
|
||
+# CONFIG_DAVICOM_PHY is not set
|
||
+# CONFIG_QSEMI_PHY is not set
|
||
+# CONFIG_LXT_PHY is not set
|
||
+# CONFIG_CICADA_PHY is not set
|
||
+# CONFIG_VITESSE_PHY is not set
|
||
+# CONFIG_SMSC_PHY is not set
|
||
+# CONFIG_BROADCOM_PHY is not set
|
||
+# CONFIG_ICPLUS_PHY is not set
|
||
+# CONFIG_REALTEK_PHY is not set
|
||
+# CONFIG_NATIONAL_PHY is not set
|
||
+# CONFIG_STE10XP is not set
|
||
+# CONFIG_LSI_ET1011C_PHY is not set
|
||
+# CONFIG_MICREL_PHY is not set
|
||
+# CONFIG_FIXED_PHY is not set
|
||
+CONFIG_MDIO_BITBANG=y
|
||
+CONFIG_NET_ETHERNET=y
|
||
+# CONFIG_AX88796 is not set
|
||
+# CONFIG_SMC91X is not set
|
||
+# CONFIG_DM9000 is not set
|
||
+# CONFIG_ETHOC is not set
|
||
+# CONFIG_SMC911X is not set
|
||
+# CONFIG_SMSC911X is not set
|
||
+# CONFIG_DNET is not set
|
||
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
|
||
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
|
||
+# CONFIG_IBM_NEW_EMAC_TAH is not set
|
||
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
|
||
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
|
||
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
|
||
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
|
||
+# CONFIG_B44 is not set
|
||
+# CONFIG_KS8851_MLL is not set
|
||
+# CONFIG_FTMAC100 is not set
|
||
+CONFIG_NETDEV_1000=y
|
||
+CONFIG_XMMAC_ETH=y
|
||
+# CONFIG_NETDEV_10000 is not set
|
||
+# CONFIG_WLAN is not set
|
||
+
|
||
+#
|
||
+# Enable WiMAX (Networking options) to see the WiMAX drivers
|
||
+#
|
||
+# CONFIG_WAN is not set
|
||
+
|
||
+#
|
||
+# CAIF transport drivers
|
||
+#
|
||
+CONFIG_PPP=y
|
||
+CONFIG_PPP_FILTER=y
|
||
+CONFIG_PPP_ASYNC=y
|
||
+CONFIG_PPP_SYNC_TTY=y
|
||
+CONFIG_PPP_DEFLATE=y
|
||
+CONFIG_PPP_BSDCOMP=y
|
||
+# CONFIG_SLIP is not set
|
||
+CONFIG_SLHC=y
|
||
+# CONFIG_NETCONSOLE is not set
|
||
+# CONFIG_NETPOLL is not set
|
||
+# CONFIG_NET_POLL_CONTROLLER is not set
|
||
+# CONFIG_ISDN is not set
|
||
+# CONFIG_PHONE is not set
|
||
+
|
||
+#
|
||
+# Input device support
|
||
+#
|
||
+# CONFIG_INPUT is not set
|
||
+
|
||
+#
|
||
+# Hardware I/O ports
|
||
+#
|
||
+CONFIG_SERIO=y
|
||
+CONFIG_SERIO_SERPORT=y
|
||
+# CONFIG_SERIO_AMBAKMI is not set
|
||
+CONFIG_SERIO_LIBPS2=y
|
||
+# CONFIG_SERIO_RAW is not set
|
||
+# CONFIG_SERIO_ALTERA_PS2 is not set
|
||
+# CONFIG_SERIO_PS2MULT is not set
|
||
+CONFIG_GAMEPORT=m
|
||
+CONFIG_GAMEPORT_NS558=m
|
||
+CONFIG_GAMEPORT_L4=m
|
||
+
|
||
+#
|
||
+# Character devices
|
||
+#
|
||
+# CONFIG_VT is not set
|
||
+CONFIG_UNIX98_PTYS=y
|
||
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
|
||
+# CONFIG_LEGACY_PTYS is not set
|
||
+# CONFIG_SERIAL_NONSTANDARD is not set
|
||
+# CONFIG_TRACE_SINK is not set
|
||
+# CONFIG_DEVKMEM is not set
|
||
+
|
||
+#
|
||
+# Serial drivers
|
||
+#
|
||
+# CONFIG_SERIAL_8250 is not set
|
||
+
|
||
+#
|
||
+# Non-8250 serial port support
|
||
+#
|
||
+# CONFIG_SERIAL_AMBA_PL010 is not set
|
||
+CONFIG_SERIAL_AMBA_PL011=y
|
||
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
|
||
+CONFIG_SERIAL_CORE=y
|
||
+CONFIG_SERIAL_CORE_CONSOLE=y
|
||
+# CONFIG_SERIAL_TIMBERDALE is not set
|
||
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
|
||
+# CONFIG_SERIAL_ALTERA_UART is not set
|
||
+# CONFIG_SERIAL_XILINX_PS_UART is not set
|
||
+# CONFIG_TTY_PRINTK is not set
|
||
+# CONFIG_HVC_DCC is not set
|
||
+# CONFIG_IPMI_HANDLER is not set
|
||
+# CONFIG_HW_RANDOM is not set
|
||
+# CONFIG_R3964 is not set
|
||
+# CONFIG_RAW_DRIVER is not set
|
||
+# CONFIG_RAMOOPS is not set
|
||
+# CONFIG_I2C is not set
|
||
+# CONFIG_SPI is not set
|
||
+
|
||
+#
|
||
+# PPS support
|
||
+#
|
||
+
|
||
+#
|
||
+# PPS generators support
|
||
+#
|
||
+
|
||
+#
|
||
+# PTP clock support
|
||
+#
|
||
+
|
||
+#
|
||
+# Enable Device Drivers -> PPS to see the PTP clock options.
|
||
+#
|
||
+# CONFIG_W1 is not set
|
||
+# CONFIG_POWER_SUPPLY is not set
|
||
+# CONFIG_HWMON is not set
|
||
+# CONFIG_THERMAL is not set
|
||
+# CONFIG_WATCHDOG is not set
|
||
+CONFIG_SSB_POSSIBLE=y
|
||
+
|
||
+#
|
||
+# Sonics Silicon Backplane
|
||
+#
|
||
+# CONFIG_SSB is not set
|
||
+CONFIG_BCMA_POSSIBLE=y
|
||
+
|
||
+#
|
||
+# Broadcom specific AMBA
|
||
+#
|
||
+# CONFIG_BCMA is not set
|
||
+# CONFIG_MFD_SUPPORT is not set
|
||
+# CONFIG_REGULATOR is not set
|
||
+# CONFIG_MEDIA_SUPPORT is not set
|
||
+
|
||
+#
|
||
+# Graphics support
|
||
+#
|
||
+# CONFIG_DRM is not set
|
||
+# CONFIG_VGASTATE is not set
|
||
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
|
||
+CONFIG_FB=y
|
||
+# CONFIG_FIRMWARE_EDID is not set
|
||
+# CONFIG_FB_DDC is not set
|
||
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
|
||
+# CONFIG_FB_CFB_FILLRECT is not set
|
||
+# CONFIG_FB_CFB_COPYAREA is not set
|
||
+# CONFIG_FB_CFB_IMAGEBLIT is not set
|
||
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
|
||
+# CONFIG_FB_SYS_FILLRECT is not set
|
||
+# CONFIG_FB_SYS_COPYAREA is not set
|
||
+# CONFIG_FB_SYS_IMAGEBLIT is not set
|
||
+# CONFIG_FB_FOREIGN_ENDIAN is not set
|
||
+# CONFIG_FB_SYS_FOPS is not set
|
||
+# CONFIG_FB_WMT_GE_ROPS is not set
|
||
+# CONFIG_FB_SVGALIB is not set
|
||
+# CONFIG_FB_MACMODES is not set
|
||
+# CONFIG_FB_BACKLIGHT is not set
|
||
+# CONFIG_FB_MODE_HELPERS is not set
|
||
+# CONFIG_FB_TILEBLITTING is not set
|
||
+
|
||
+#
|
||
+# Frame buffer hardware drivers
|
||
+#
|
||
+# CONFIG_FB_ARMCLCD is not set
|
||
+# CONFIG_FB_S1D13XXX is not set
|
||
+# CONFIG_FB_VIRTUAL is not set
|
||
+# CONFIG_FB_METRONOME is not set
|
||
+# CONFIG_FB_BROADSHEET is not set
|
||
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
|
||
+
|
||
+#
|
||
+# Display device support
|
||
+#
|
||
+# CONFIG_DISPLAY_SUPPORT is not set
|
||
+# CONFIG_LOGO is not set
|
||
+# CONFIG_SOUND is not set
|
||
+# CONFIG_USB_SUPPORT is not set
|
||
+# CONFIG_MMC is not set
|
||
+# CONFIG_MEMSTICK is not set
|
||
+# CONFIG_NEW_LEDS is not set
|
||
+# CONFIG_NFC_DEVICES is not set
|
||
+# CONFIG_ACCESSIBILITY is not set
|
||
+CONFIG_RTC_LIB=y
|
||
+# CONFIG_RTC_CLASS is not set
|
||
+# CONFIG_DMADEVICES is not set
|
||
+# CONFIG_AUXDISPLAY is not set
|
||
+# CONFIG_UIO is not set
|
||
+# CONFIG_STAGING is not set
|
||
+CONFIG_CLKDEV_LOOKUP=y
|
||
+
|
||
+#
|
||
+# File systems
|
||
+#
|
||
+# CONFIG_EXT2_FS is not set
|
||
+# CONFIG_EXT3_FS is not set
|
||
+# CONFIG_EXT4_FS is not set
|
||
+# CONFIG_REISERFS_FS is not set
|
||
+# CONFIG_JFS_FS is not set
|
||
+# CONFIG_XFS_FS is not set
|
||
+# CONFIG_OCFS2_FS is not set
|
||
+CONFIG_FS_POSIX_ACL=y
|
||
+CONFIG_FILE_LOCKING=y
|
||
+CONFIG_FSNOTIFY=y
|
||
+CONFIG_DNOTIFY=y
|
||
+CONFIG_INOTIFY_USER=y
|
||
+# CONFIG_FANOTIFY is not set
|
||
+# CONFIG_QUOTA is not set
|
||
+# CONFIG_QUOTACTL is not set
|
||
+CONFIG_AUTOFS4_FS=m
|
||
+CONFIG_FUSE_FS=y
|
||
+# CONFIG_CUSE is not set
|
||
+
|
||
+#
|
||
+# Caches
|
||
+#
|
||
+# CONFIG_FSCACHE is not set
|
||
+
|
||
+#
|
||
+# CD-ROM/DVD Filesystems
|
||
+#
|
||
+# CONFIG_ISO9660_FS is not set
|
||
+# CONFIG_UDF_FS is not set
|
||
+
|
||
+#
|
||
+# DOS/FAT/NT Filesystems
|
||
+#
|
||
+# CONFIG_MSDOS_FS is not set
|
||
+# CONFIG_VFAT_FS is not set
|
||
+# CONFIG_NTFS_FS is not set
|
||
+
|
||
+#
|
||
+# Pseudo filesystems
|
||
+#
|
||
+CONFIG_PROC_FS=y
|
||
+CONFIG_PROC_SYSCTL=y
|
||
+CONFIG_PROC_PAGE_MONITOR=y
|
||
+CONFIG_SYSFS=y
|
||
+CONFIG_TMPFS=y
|
||
+# CONFIG_TMPFS_POSIX_ACL is not set
|
||
+# CONFIG_TMPFS_XATTR is not set
|
||
+# CONFIG_HUGETLB_PAGE is not set
|
||
+CONFIG_CONFIGFS_FS=y
|
||
+CONFIG_MISC_FILESYSTEMS=y
|
||
+# CONFIG_HFSPLUS_FS is not set
|
||
+CONFIG_JFFS2_FS=y
|
||
+CONFIG_JFFS2_FS_DEBUG=0
|
||
+CONFIG_JFFS2_FS_WRITEBUFFER=y
|
||
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
|
||
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
|
||
+CONFIG_JFFS2_ZLIB=y
|
||
+CONFIG_JFFS2_LZO=y
|
||
+CONFIG_JFFS2_RTIME=y
|
||
+# CONFIG_JFFS2_RUBIN is not set
|
||
+# CONFIG_JFFS2_CMODE_NONE is not set
|
||
+# CONFIG_JFFS2_CMODE_PRIORITY is not set
|
||
+# CONFIG_JFFS2_CMODE_SIZE is not set
|
||
+CONFIG_JFFS2_CMODE_FAVOURLZO=y
|
||
+CONFIG_CRAMFS=y
|
||
+# CONFIG_SQUASHFS is not set
|
||
+# CONFIG_VXFS_FS is not set
|
||
+# CONFIG_MINIX_FS is not set
|
||
+# CONFIG_OMFS_FS is not set
|
||
+# CONFIG_HPFS_FS is not set
|
||
+# CONFIG_QNX4FS_FS is not set
|
||
+# CONFIG_ROMFS_FS is not set
|
||
+# CONFIG_PSTORE is not set
|
||
+# CONFIG_SYSV_FS is not set
|
||
+# CONFIG_UFS_FS is not set
|
||
+CONFIG_NETWORK_FILESYSTEMS=y
|
||
+CONFIG_NFS_FS=y
|
||
+CONFIG_NFS_V3=y
|
||
+CONFIG_NFS_V3_ACL=y
|
||
+CONFIG_NFS_V4=y
|
||
+# CONFIG_NFS_USE_LEGACY_DNS is not set
|
||
+CONFIG_NFS_USE_KERNEL_DNS=y
|
||
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
|
||
+# CONFIG_NFSD is not set
|
||
+CONFIG_LOCKD=y
|
||
+CONFIG_LOCKD_V4=y
|
||
+CONFIG_NFS_ACL_SUPPORT=y
|
||
+CONFIG_NFS_COMMON=y
|
||
+CONFIG_SUNRPC=y
|
||
+CONFIG_SUNRPC_GSS=y
|
||
+# CONFIG_CIFS is not set
|
||
+# CONFIG_NCP_FS is not set
|
||
+# CONFIG_CODA_FS is not set
|
||
+
|
||
+#
|
||
+# Partition Types
|
||
+#
|
||
+CONFIG_PARTITION_ADVANCED=y
|
||
+# CONFIG_ACORN_PARTITION is not set
|
||
+# CONFIG_OSF_PARTITION is not set
|
||
+# CONFIG_AMIGA_PARTITION is not set
|
||
+# CONFIG_ATARI_PARTITION is not set
|
||
+# CONFIG_MAC_PARTITION is not set
|
||
+CONFIG_MSDOS_PARTITION=y
|
||
+# CONFIG_BSD_DISKLABEL is not set
|
||
+# CONFIG_MINIX_SUBPARTITION is not set
|
||
+# CONFIG_SOLARIS_X86_PARTITION is not set
|
||
+# CONFIG_UNIXWARE_DISKLABEL is not set
|
||
+# CONFIG_LDM_PARTITION is not set
|
||
+# CONFIG_SGI_PARTITION is not set
|
||
+# CONFIG_ULTRIX_PARTITION is not set
|
||
+# CONFIG_SUN_PARTITION is not set
|
||
+# CONFIG_KARMA_PARTITION is not set
|
||
+# CONFIG_EFI_PARTITION is not set
|
||
+# CONFIG_SYSV68_PARTITION is not set
|
||
+CONFIG_NLS=y
|
||
+CONFIG_NLS_DEFAULT="iso8859-1"
|
||
+CONFIG_NLS_CODEPAGE_437=y
|
||
+CONFIG_NLS_CODEPAGE_737=m
|
||
+CONFIG_NLS_CODEPAGE_775=m
|
||
+CONFIG_NLS_CODEPAGE_850=m
|
||
+CONFIG_NLS_CODEPAGE_852=m
|
||
+CONFIG_NLS_CODEPAGE_855=m
|
||
+CONFIG_NLS_CODEPAGE_857=m
|
||
+CONFIG_NLS_CODEPAGE_860=m
|
||
+CONFIG_NLS_CODEPAGE_861=m
|
||
+CONFIG_NLS_CODEPAGE_862=m
|
||
+CONFIG_NLS_CODEPAGE_863=m
|
||
+CONFIG_NLS_CODEPAGE_864=m
|
||
+CONFIG_NLS_CODEPAGE_865=m
|
||
+CONFIG_NLS_CODEPAGE_866=m
|
||
+CONFIG_NLS_CODEPAGE_869=m
|
||
+CONFIG_NLS_CODEPAGE_936=y
|
||
+CONFIG_NLS_CODEPAGE_950=m
|
||
+CONFIG_NLS_CODEPAGE_932=m
|
||
+CONFIG_NLS_CODEPAGE_949=m
|
||
+CONFIG_NLS_CODEPAGE_874=m
|
||
+CONFIG_NLS_ISO8859_8=m
|
||
+CONFIG_NLS_CODEPAGE_1250=m
|
||
+CONFIG_NLS_CODEPAGE_1251=m
|
||
+CONFIG_NLS_ASCII=y
|
||
+CONFIG_NLS_ISO8859_1=y
|
||
+CONFIG_NLS_ISO8859_2=m
|
||
+CONFIG_NLS_ISO8859_3=m
|
||
+CONFIG_NLS_ISO8859_4=m
|
||
+CONFIG_NLS_ISO8859_5=m
|
||
+CONFIG_NLS_ISO8859_6=m
|
||
+CONFIG_NLS_ISO8859_7=m
|
||
+CONFIG_NLS_ISO8859_9=m
|
||
+CONFIG_NLS_ISO8859_13=m
|
||
+CONFIG_NLS_ISO8859_14=m
|
||
+CONFIG_NLS_ISO8859_15=m
|
||
+CONFIG_NLS_KOI8_R=m
|
||
+CONFIG_NLS_KOI8_U=m
|
||
+CONFIG_NLS_UTF8=y
|
||
+
|
||
+#
|
||
+# Kernel hacking
|
||
+#
|
||
+# CONFIG_PRINTK_TIME is not set
|
||
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
|
||
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||
+# CONFIG_ENABLE_MUST_CHECK is not set
|
||
+CONFIG_FRAME_WARN=1024
|
||
+# CONFIG_MAGIC_SYSRQ is not set
|
||
+# CONFIG_STRIP_ASM_SYMS is not set
|
||
+# CONFIG_UNUSED_SYMBOLS is not set
|
||
+# CONFIG_DEBUG_FS is not set
|
||
+# CONFIG_HEADERS_CHECK is not set
|
||
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
|
||
+# CONFIG_DEBUG_KERNEL is not set
|
||
+# CONFIG_HARDLOCKUP_DETECTOR is not set
|
||
+# CONFIG_SLUB_STATS is not set
|
||
+# CONFIG_SPARSE_RCU_POINTER is not set
|
||
+CONFIG_DEBUG_BUGVERBOSE=y
|
||
+# CONFIG_DEBUG_MEMORY_INIT is not set
|
||
+CONFIG_FRAME_POINTER=y
|
||
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
|
||
+CONFIG_HAVE_FUNCTION_TRACER=y
|
||
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
|
||
+CONFIG_HAVE_DYNAMIC_FTRACE=y
|
||
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
|
||
+CONFIG_HAVE_C_RECORDMCOUNT=y
|
||
+CONFIG_TRACING_SUPPORT=y
|
||
+# CONFIG_FTRACE is not set
|
||
+# CONFIG_DMA_API_DEBUG is not set
|
||
+# CONFIG_ATOMIC64_SELFTEST is not set
|
||
+# CONFIG_SAMPLES is not set
|
||
+CONFIG_HAVE_ARCH_KGDB=y
|
||
+# CONFIG_TEST_KSTRTOX is not set
|
||
+# CONFIG_STRICT_DEVMEM is not set
|
||
+# CONFIG_DEBUG_USER is not set
|
||
+# CONFIG_OC_ETM is not set
|
||
+
|
||
+#
|
||
+# Security options
|
||
+#
|
||
+CONFIG_KEYS=y
|
||
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
|
||
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
|
||
+# CONFIG_SECURITY is not set
|
||
+# CONFIG_SECURITYFS is not set
|
||
+CONFIG_DEFAULT_SECURITY_DAC=y
|
||
+CONFIG_DEFAULT_SECURITY=""
|
||
+CONFIG_CRYPTO=y
|
||
+
|
||
+#
|
||
+# Crypto core or helper
|
||
+#
|
||
+CONFIG_CRYPTO_ALGAPI=y
|
||
+CONFIG_CRYPTO_ALGAPI2=y
|
||
+CONFIG_CRYPTO_AEAD2=y
|
||
+CONFIG_CRYPTO_BLKCIPHER=y
|
||
+CONFIG_CRYPTO_BLKCIPHER2=y
|
||
+CONFIG_CRYPTO_HASH=y
|
||
+CONFIG_CRYPTO_HASH2=y
|
||
+CONFIG_CRYPTO_RNG2=y
|
||
+CONFIG_CRYPTO_PCOMP2=y
|
||
+CONFIG_CRYPTO_MANAGER=y
|
||
+CONFIG_CRYPTO_MANAGER2=y
|
||
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
|
||
+# CONFIG_CRYPTO_GF128MUL is not set
|
||
+# CONFIG_CRYPTO_NULL is not set
|
||
+CONFIG_CRYPTO_WORKQUEUE=y
|
||
+# CONFIG_CRYPTO_CRYPTD is not set
|
||
+# CONFIG_CRYPTO_AUTHENC is not set
|
||
+# CONFIG_CRYPTO_TEST is not set
|
||
+
|
||
+#
|
||
+# Authenticated Encryption with Associated Data
|
||
+#
|
||
+# CONFIG_CRYPTO_CCM is not set
|
||
+# CONFIG_CRYPTO_GCM is not set
|
||
+# CONFIG_CRYPTO_SEQIV is not set
|
||
+
|
||
+#
|
||
+# Block modes
|
||
+#
|
||
+CONFIG_CRYPTO_CBC=y
|
||
+# CONFIG_CRYPTO_CTR is not set
|
||
+# CONFIG_CRYPTO_CTS is not set
|
||
+CONFIG_CRYPTO_ECB=y
|
||
+# CONFIG_CRYPTO_PCBC is not set
|
||
+
|
||
+#
|
||
+# Hash modes
|
||
+#
|
||
+CONFIG_CRYPTO_HMAC=y
|
||
+
|
||
+#
|
||
+# Digest
|
||
+#
|
||
+# CONFIG_CRYPTO_CRC32C is not set
|
||
+# CONFIG_CRYPTO_GHASH is not set
|
||
+CONFIG_CRYPTO_MD4=y
|
||
+CONFIG_CRYPTO_MD5=y
|
||
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
|
||
+# CONFIG_CRYPTO_RMD128 is not set
|
||
+# CONFIG_CRYPTO_RMD160 is not set
|
||
+# CONFIG_CRYPTO_RMD256 is not set
|
||
+# CONFIG_CRYPTO_RMD320 is not set
|
||
+# CONFIG_CRYPTO_SHA1 is not set
|
||
+# CONFIG_CRYPTO_SHA256 is not set
|
||
+# CONFIG_CRYPTO_SHA512 is not set
|
||
+# CONFIG_CRYPTO_TGR192 is not set
|
||
+# CONFIG_CRYPTO_WP512 is not set
|
||
+
|
||
+#
|
||
+# Ciphers
|
||
+#
|
||
+CONFIG_CRYPTO_AES=y
|
||
+# CONFIG_CRYPTO_ANUBIS is not set
|
||
+CONFIG_CRYPTO_ARC4=y
|
||
+# CONFIG_CRYPTO_BLOWFISH is not set
|
||
+# CONFIG_CRYPTO_CAMELLIA is not set
|
||
+# CONFIG_CRYPTO_CAST5 is not set
|
||
+# CONFIG_CRYPTO_CAST6 is not set
|
||
+CONFIG_CRYPTO_DES=y
|
||
+# CONFIG_CRYPTO_FCRYPT is not set
|
||
+# CONFIG_CRYPTO_KHAZAD is not set
|
||
+# CONFIG_CRYPTO_SEED is not set
|
||
+# CONFIG_CRYPTO_SERPENT is not set
|
||
+# CONFIG_CRYPTO_TEA is not set
|
||
+# CONFIG_CRYPTO_TWOFISH is not set
|
||
+
|
||
+#
|
||
+# Compression
|
||
+#
|
||
+# CONFIG_CRYPTO_DEFLATE is not set
|
||
+# CONFIG_CRYPTO_ZLIB is not set
|
||
+# CONFIG_CRYPTO_LZO is not set
|
||
+
|
||
+#
|
||
+# Random Number Generation
|
||
+#
|
||
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||
+# CONFIG_CRYPTO_USER_API_HASH is not set
|
||
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
|
||
+CONFIG_CRYPTO_HW=y
|
||
+# CONFIG_BINARY_PRINTF is not set
|
||
+
|
||
+#
|
||
+# Library routines
|
||
+#
|
||
+CONFIG_BITREVERSE=y
|
||
+CONFIG_CRC_CCITT=y
|
||
+CONFIG_CRC16=m
|
||
+# CONFIG_CRC_T10DIF is not set
|
||
+CONFIG_CRC_ITU_T=y
|
||
+CONFIG_CRC32=y
|
||
+# CONFIG_CRC7 is not set
|
||
+# CONFIG_LIBCRC32C is not set
|
||
+CONFIG_ZLIB_INFLATE=y
|
||
+CONFIG_ZLIB_DEFLATE=y
|
||
+CONFIG_LZO_COMPRESS=y
|
||
+CONFIG_LZO_DECOMPRESS=y
|
||
+CONFIG_XZ_DEC=y
|
||
+CONFIG_XZ_DEC_X86=y
|
||
+CONFIG_XZ_DEC_POWERPC=y
|
||
+CONFIG_XZ_DEC_IA64=y
|
||
+CONFIG_XZ_DEC_ARM=y
|
||
+CONFIG_XZ_DEC_ARMTHUMB=y
|
||
+CONFIG_XZ_DEC_SPARC=y
|
||
+CONFIG_XZ_DEC_BCJ=y
|
||
+# CONFIG_XZ_DEC_TEST is not set
|
||
+CONFIG_HAS_IOMEM=y
|
||
+CONFIG_HAS_IOPORT=y
|
||
+CONFIG_HAS_DMA=y
|
||
+CONFIG_NLATTR=y
|
||
+CONFIG_AVERAGE=y
|
||
diff -urN linux-3.0.101/arch/arm/include/asm/hardware/debug-pl01x.S linux-3.0.101.xm510/arch/arm/include/asm/hardware/debug-pl01x.S
|
||
--- linux-3.0.101/arch/arm/include/asm/hardware/debug-pl01x.S 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/include/asm/hardware/debug-pl01x.S 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -13,7 +13,7 @@
|
||
#include <linux/amba/serial.h>
|
||
|
||
.macro senduart,rd,rx
|
||
- strb \rd, [\rx, #UART01x_DR]
|
||
+ str \rd, [\rx, #UART01x_DR]
|
||
.endm
|
||
|
||
.macro waituart,rd,rx
|
||
diff -urN linux-3.0.101/arch/arm/include/asm/setup.h linux-3.0.101.xm510/arch/arm/include/asm/setup.h
|
||
--- linux-3.0.101/arch/arm/include/asm/setup.h 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/include/asm/setup.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -126,6 +126,17 @@
|
||
char cmdline[1]; /* this is the minimum size */
|
||
};
|
||
|
||
+#define ATAG_XMINFO 0x54410010
|
||
+struct tag_xminfo
|
||
+{
|
||
+ u8 xmauto;
|
||
+ u8 xmuart;
|
||
+ u8 ethaddr[18];
|
||
+ u8 p_id[32];
|
||
+ u8 hwid[32];
|
||
+};
|
||
+
|
||
+
|
||
/* acorn RiscPC specific information */
|
||
#define ATAG_ACORN 0x41000101
|
||
|
||
@@ -156,6 +167,8 @@
|
||
struct tag_videolfb videolfb;
|
||
struct tag_cmdline cmdline;
|
||
|
||
+ struct tag_xminfo xminfo;
|
||
+
|
||
/*
|
||
* Acorn specific
|
||
*/
|
||
diff -urN linux-3.0.101/arch/arm/Kconfig linux-3.0.101.xm510/arch/arm/Kconfig
|
||
--- linux-3.0.101/arch/arm/Kconfig 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -879,8 +879,23 @@
|
||
select HAVE_PWM
|
||
help
|
||
Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
|
||
+
|
||
+config ARCH_XM510
|
||
+ bool "XM 510"
|
||
+ select ARM_AMBA
|
||
+ select CLKDEV_LOOKUP
|
||
+ select HAVE_CLK
|
||
+ select ARM_VIC
|
||
+ select HAVE_SCHED_CLOCK
|
||
+ select GENERIC_TIME
|
||
+ select GENERIC_CLOCKEVENTS
|
||
+ select ARCH_HAS_CPUFREQ
|
||
+ help
|
||
+ This enables support for XM xm510 platform.
|
||
+
|
||
endchoice
|
||
|
||
+
|
||
#
|
||
# This is sorted alphabetically by mach-* pathname. However, plat-*
|
||
# Kconfigs may be included either alphabetically (according to the
|
||
@@ -903,6 +918,9 @@
|
||
source "arch/arm/mach-footbridge/Kconfig"
|
||
|
||
source "arch/arm/mach-gemini/Kconfig"
|
||
+if ARCH_XM510
|
||
+source "arch/arm/mach-xm510/Kconfig"
|
||
+endif
|
||
|
||
source "arch/arm/mach-h720x/Kconfig"
|
||
|
||
diff -urN linux-3.0.101/arch/arm/kernel/setup.c linux-3.0.101.xm510/arch/arm/kernel/setup.c
|
||
--- linux-3.0.101/arch/arm/kernel/setup.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/kernel/setup.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -115,6 +115,10 @@
|
||
EXPORT_SYMBOL(outer_cache);
|
||
#endif
|
||
|
||
+
|
||
+struct tag_xminfo xminfo;
|
||
+EXPORT_SYMBOL(xminfo);
|
||
+
|
||
struct stack {
|
||
u32 irq[3];
|
||
u32 abt[3];
|
||
@@ -677,6 +681,23 @@
|
||
|
||
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
|
||
|
||
+
|
||
+static int __init parse_tag_xminfo(const struct tag *tag)
|
||
+{
|
||
+ memset(&xminfo, 0, sizeof(xminfo));
|
||
+ xminfo.xmauto = tag->u.xminfo.xmauto;
|
||
+ xminfo.xmuart = tag->u.xminfo.xmuart;
|
||
+ strcpy(xminfo.ethaddr, tag->u.xminfo.ethaddr);
|
||
+ strcpy(xminfo.p_id, tag->u.xminfo.p_id);
|
||
+ strcpy(xminfo.hwid, tag->u.xminfo.hwid);
|
||
+
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+__tagtable(ATAG_XMINFO, parse_tag_xminfo);
|
||
+
|
||
+
|
||
+
|
||
/*
|
||
* Scan the tag table for this tag, and call its parse function.
|
||
* The tag table is built by the linker from all the __tagtable
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/clock.c linux-3.0.101.xm510/arch/arm/mach-xm510/clock.c
|
||
--- linux-3.0.101/arch/arm/mach-xm510/clock.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/clock.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,34 @@
|
||
+#include <linux/module.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/device.h>
|
||
+#include <linux/list.h>
|
||
+#include <linux/errno.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/string.h>
|
||
+#include <linux/clk.h>
|
||
+#include <linux/mutex.h>
|
||
+
|
||
+#include <asm/clkdev.h>
|
||
+#include <linux/io.h>
|
||
+#include <mach/io.h>
|
||
+#include <mach/platform.h>
|
||
+#include <mach/early-debug.h>
|
||
+#include "clock.h"
|
||
+
|
||
+int clk_enable(struct clk *clk)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL(clk_enable);
|
||
+
|
||
+void clk_disable(struct clk *clk)
|
||
+{
|
||
+}
|
||
+EXPORT_SYMBOL(clk_disable);
|
||
+
|
||
+unsigned long clk_get_rate(struct clk *clk)
|
||
+{
|
||
+ return clk->rate;
|
||
+}
|
||
+EXPORT_SYMBOL(clk_get_rate);
|
||
+
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/clock.h linux-3.0.101.xm510/arch/arm/mach-xm510/clock.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/clock.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/clock.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,12 @@
|
||
+#ifndef __HISI_CLOCK_H__
|
||
+#define __HISI_CLOCK_H__
|
||
+
|
||
+#define OSC_FREQ 24000000
|
||
+struct clk {
|
||
+ unsigned long rate;
|
||
+};
|
||
+
|
||
+unsigned long get_ahbclk_hw(void);
|
||
+
|
||
+#endif
|
||
+
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/core.c linux-3.0.101.xm510/arch/arm/mach-xm510/core.c
|
||
--- linux-3.0.101/arch/arm/mach-xm510/core.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/core.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,414 @@
|
||
+#include <linux/init.h>
|
||
+#include <linux/device.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/sysdev.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/amba/bus.h>
|
||
+#include <linux/amba/clcd.h>
|
||
+#include <linux/clocksource.h>
|
||
+#include <linux/clockchips.h>
|
||
+#include <linux/cnt32_to_63.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/clkdev.h>
|
||
+#include <asm/sched_clock.h>
|
||
+
|
||
+#include <asm/system.h>
|
||
+#include <asm/irq.h>
|
||
+#include <linux/leds.h>
|
||
+#include <asm/hardware/arm_timer.h>
|
||
+#include <asm/hardware/gic.h>
|
||
+#include <asm/hardware/vic.h>
|
||
+#include <asm/mach-types.h>
|
||
+
|
||
+#include <asm/mach/arch.h>
|
||
+#include <asm/mach/flash.h>
|
||
+#include <asm/mach/irq.h>
|
||
+#include <asm/mach/time.h>
|
||
+#include <asm/mach/map.h>
|
||
+
|
||
+#include <mach/time.h>
|
||
+#include <mach/hardware.h>
|
||
+#include <mach/early-debug.h>
|
||
+#include <mach/irqs.h>
|
||
+#include <mach/dma.h>
|
||
+#include <linux/bootmem.h>
|
||
+#include <linux/amba/serial.h>
|
||
+#include <linux/amba/pl330.h>
|
||
+#include "clock.h"
|
||
+
|
||
+#define ioaddr_intc(x) IO_ADDRESS(INTC_BASE + (x))
|
||
+#define GPIO0_MULT_USE_EN IO_ADDRESS(GPIO_BASE)
|
||
+
|
||
+
|
||
+#define do_level_IRQ handle_level_irq
|
||
+static void xm_ack_irq(struct irq_data *data)
|
||
+{
|
||
+ unsigned long irq = data->irq;
|
||
+ irq -= INTNR_IRQ_START;
|
||
+ writel(1<<irq, ioaddr_intc(REG_INTC_INTENCLEAR));
|
||
+}
|
||
+
|
||
+static void xm_mask_irq(struct irq_data *data)
|
||
+{
|
||
+ unsigned long irq = data->irq;
|
||
+ irq -= INTNR_IRQ_START;
|
||
+ writel(1<<irq, ioaddr_intc(REG_INTC_INTENCLEAR));
|
||
+}
|
||
+
|
||
+static void xm_unmask_irq(struct irq_data *data)
|
||
+{
|
||
+ unsigned long irq = data->irq;
|
||
+ irq -= INTNR_IRQ_START;
|
||
+ writel(1<<irq, ioaddr_intc(REG_INTC_INTENABLE));
|
||
+}
|
||
+
|
||
+static struct irq_chip xm_chip = {
|
||
+ .irq_ack = xm_ack_irq,
|
||
+ .irq_mask = xm_mask_irq,
|
||
+ .irq_unmask = xm_unmask_irq,
|
||
+ .irq_disable = xm_mask_irq,
|
||
+};
|
||
+
|
||
+static struct map_desc xm510_io_desc[] __initdata = {
|
||
+ {
|
||
+ .virtual = XM510_IOCH1_VIRT,
|
||
+ .pfn = __phys_to_pfn(XM510_IOCH1_PHYS),
|
||
+ .length = XM510_IOCH1_SIZE,
|
||
+ .type = MT_DEVICE
|
||
+ },
|
||
+ {
|
||
+ .virtual = XM510_IOCH2_VIRT,
|
||
+ .pfn = __phys_to_pfn(XM510_IOCH2_PHYS),
|
||
+ .length = XM510_IOCH2_SIZE,
|
||
+ .type = MT_DEVICE
|
||
+ }
|
||
+};
|
||
+
|
||
+static unsigned long xm510_timer_reload, timer0_clk_hz, timer1_clk_hz,
|
||
+ timer0_clk_khz, timer1_clk_khz;
|
||
+
|
||
+
|
||
+static void early_init(void)
|
||
+{
|
||
+ unsigned long timerclk;
|
||
+
|
||
+ edb_trace();
|
||
+ timerclk = CFG_TIMER_CLK;
|
||
+ printk(KERN_INFO "timer clock %ld.\n", timerclk);
|
||
+
|
||
+ xm510_timer_reload = timerclk / HZ;
|
||
+ timer0_clk_hz = timerclk;
|
||
+ timer0_clk_khz = timerclk / 1000;
|
||
+ timer1_clk_hz = timerclk;
|
||
+ timer1_clk_khz = timerclk / 1000;
|
||
+
|
||
+}
|
||
+
|
||
+void __init xm510_map_io(void)
|
||
+{
|
||
+ int i;
|
||
+
|
||
+ iotable_init(xm510_io_desc, ARRAY_SIZE(xm510_io_desc));
|
||
+
|
||
+ for (i = 0; i < ARRAY_SIZE(xm510_io_desc); i++) {
|
||
+ edb_putstr(" V: "); edb_puthex(xm510_io_desc[i].virtual);
|
||
+ edb_putstr(" P: "); edb_puthex(xm510_io_desc[i].pfn);
|
||
+ edb_putstr(" S: "); edb_puthex(xm510_io_desc[i].length);
|
||
+ edb_putstr(" T: "); edb_putul(xm510_io_desc[i].type);
|
||
+ edb_putstr("\n");
|
||
+ }
|
||
+
|
||
+ early_init();
|
||
+
|
||
+ edb_trace();
|
||
+}
|
||
+
|
||
+void __init xm510_init_irq(void)
|
||
+{
|
||
+ unsigned int i;
|
||
+
|
||
+ edb_trace();
|
||
+
|
||
+ writel(~0, ioaddr_intc(REG_INTC_INTENCLEAR));
|
||
+ writel(0, ioaddr_intc(REG_INTC_INTSELECT));
|
||
+ writel(~0, ioaddr_intc(REG_INTC_SOFTINTCLEAR));
|
||
+
|
||
+ for (i = INTNR_IRQ_START; i <= INTNR_IRQ_END; i++) {
|
||
+ irq_set_chip(i, &xm_chip);
|
||
+ irq_set_handler(i, do_level_IRQ);
|
||
+ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
|
||
+ }
|
||
+}
|
||
+
|
||
+static unsigned long long xm510_cycles_2_ns(unsigned long long cyc)
|
||
+{
|
||
+ unsigned long cyc2ns_scale = (1000000 << 10)/timer1_clk_khz;
|
||
+ return (cyc * cyc2ns_scale) >> 10;
|
||
+}
|
||
+
|
||
+static unsigned long free_timer_overflows;
|
||
+
|
||
+unsigned long long sched_clock(void)
|
||
+{
|
||
+ unsigned long long ticks64;
|
||
+ unsigned long ticks2, ticks1;
|
||
+ ticks2 = 0UL - (unsigned long)readl(CFG_TIMER01_VABASE
|
||
+ + REG_TIMER1_VALUE);
|
||
+ do {
|
||
+ ticks1 = ticks2;
|
||
+ ticks64 = free_timer_overflows;
|
||
+ ticks2 = 0UL - (unsigned long)readl(CFG_TIMER01_VABASE
|
||
+ + REG_TIMER1_VALUE);
|
||
+ } while (ticks1 > ticks2);
|
||
+
|
||
+ /*
|
||
+ ** If INT is not cleaned, means the function is called with irq_save.
|
||
+ ** The ticks has overflow but 'free_timer_overflows' is not be update.
|
||
+ */
|
||
+ if (readl(CFG_TIMER01_VABASE + REG_TIMER1_MIS)) {
|
||
+ ticks64 += 1;
|
||
+ ticks2 = 0UL - (unsigned long)readl(CFG_TIMER01_VABASE
|
||
+ + REG_TIMER1_VALUE);
|
||
+ }
|
||
+
|
||
+ //printk(KERN_NOTICE "\033[1;35m REG=%#x, ticks2=%#x \n\033[m",(0UL - (unsigned long)readl(CFG_TIMER01_VABASE + REG_TIMER1_VALUE)), ticks2);
|
||
+ //printk(KERN_NOTICE"\033[1;35m free_timer=%#x, ticks64=%#x,ticks2=%ld,ns=%lld\n\033[m",free_timer_overflows, ticks64, ticks2, xm510_cycles_2_ns((ticks64 << 32) | ticks2));
|
||
+ return xm510_cycles_2_ns((ticks64 << 32) | ticks2);
|
||
+}
|
||
+
|
||
+static DEFINE_CLOCK_DATA(cd);
|
||
+static void *ctr;
|
||
+static void xm510_update_sched_clock(void)
|
||
+{
|
||
+ u32 cyc = readl(ctr);
|
||
+ update_sched_clock(&cd, cyc, (u32)~0);
|
||
+}
|
||
+static struct clocksource xm510_clocksource;
|
||
+
|
||
+static void __init xm510_sched_clock_init(void* reg, unsigned long rate)
|
||
+{
|
||
+ ctr = reg;
|
||
+ init_fixed_sched_clock(&cd, xm510_update_sched_clock,
|
||
+ 32, rate, xm510_clocksource.mult,
|
||
+ xm510_clocksource.shift);
|
||
+}
|
||
+
|
||
+
|
||
+static void timer_set_mode(enum clock_event_mode mode,
|
||
+ struct clock_event_device *clk)
|
||
+{
|
||
+ unsigned long ctrl;
|
||
+ switch (mode) {
|
||
+ case CLOCK_EVT_MODE_PERIODIC:
|
||
+ writel(0, CFG_TIMER01_VABASE + REG_TIMER_CONTROL);
|
||
+ writel(xm510_timer_reload, CFG_TIMER01_VABASE
|
||
+ + REG_TIMER_RELOAD);
|
||
+ writel(CFG_TIMER_CONTROL, CFG_TIMER01_VABASE
|
||
+ + REG_TIMER_CONTROL);
|
||
+ edb_trace();
|
||
+ break;
|
||
+ case CLOCK_EVT_MODE_ONESHOT:
|
||
+ writel((CFG_TIMER_32BIT | CFG_TIMER_ONESHOT),
|
||
+ CFG_TIMER01_VABASE + REG_TIMER_CONTROL);
|
||
+ break;
|
||
+ case CLOCK_EVT_MODE_UNUSED:
|
||
+ case CLOCK_EVT_MODE_SHUTDOWN:
|
||
+ default:
|
||
+ ctrl = readl(CFG_TIMER01_VABASE + REG_TIMER_CONTROL);
|
||
+ ctrl &= ~CFG_TIMER_ENABLE;
|
||
+ writel(ctrl, CFG_TIMER01_VABASE + REG_TIMER_CONTROL);
|
||
+ }
|
||
+}
|
||
+
|
||
+static int timer_set_next_event(unsigned long evt,
|
||
+ struct clock_event_device *unused)
|
||
+{
|
||
+ unsigned long ctrl;
|
||
+ ctrl = readl(CFG_TIMER01_VABASE + REG_TIMER_CONTROL);
|
||
+ ctrl &= ~(CFG_TIMER_ENABLE | CFG_TIMER_INTMASK);
|
||
+ writel(ctrl, CFG_TIMER01_VABASE + REG_TIMER_CONTROL);
|
||
+ writel(evt, CFG_TIMER01_VABASE + REG_TIMER_RELOAD);
|
||
+ writel(CFG_TIMER_ONE_CONTROL, CFG_TIMER01_VABASE + REG_TIMER_CONTROL);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct clock_event_device timer0_clockevent = {
|
||
+ .name = "timer0",
|
||
+ .shift = 32,
|
||
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
|
||
+ .set_mode = timer_set_mode,
|
||
+ .set_next_event = timer_set_next_event,
|
||
+};
|
||
+
|
||
+/*
|
||
+ * IRQ handler for the timer
|
||
+ */
|
||
+static irqreturn_t xm510_timer_interrupt(int irq, void *dev_id)
|
||
+{
|
||
+ if ((readl(CFG_TIMER01_VABASE+REG_TIMER_RIS)) & 0x1) {
|
||
+ writel(~0, CFG_TIMER01_VABASE + REG_TIMER_INTCLR);
|
||
+ timer0_clockevent.event_handler(&timer0_clockevent);
|
||
+ }
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+static irqreturn_t xm510_freetimer_interrupt(int irq, void *dev_id)
|
||
+{
|
||
+ if ((readl(CFG_TIMER01_VABASE+REG_TIMER1_RIS)) & 0x1) {
|
||
+ free_timer_overflows++;
|
||
+ writel(~0, CFG_TIMER01_VABASE + REG_TIMER1_INTCLR);
|
||
+ }
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+static struct irqaction xm510_timer_irq = {
|
||
+ .name = "System Timer Tick",
|
||
+ .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER,
|
||
+ .handler = xm510_timer_interrupt,
|
||
+};
|
||
+
|
||
+static struct irqaction xm510_freetimer_irq = {
|
||
+ .name = "Free Timer",
|
||
+ .flags = IRQF_SHARED | IRQF_TIMER,
|
||
+ .handler = xm510_freetimer_interrupt,
|
||
+};
|
||
+static cycle_t xm510_get_cycles(struct clocksource *cs)
|
||
+{
|
||
+ return ~readl(CFG_TIMER01_VABASE + REG_TIMER1_VALUE);
|
||
+}
|
||
+
|
||
+static struct clocksource xm510_clocksource = {
|
||
+ .name = "timer1",
|
||
+ .rating = 200,
|
||
+ .read = xm510_get_cycles,
|
||
+ .mask = CLOCKSOURCE_MASK(32),
|
||
+ .shift = 26,
|
||
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||
+};
|
||
+
|
||
+static int __init xm510_clocksource_init(void)
|
||
+{
|
||
+ writel(0, CFG_TIMER01_VABASE + REG_TIMER1_CONTROL);
|
||
+ writel(0xffffffff, CFG_TIMER01_VABASE + REG_TIMER1_RELOAD);
|
||
+ writel(0xffffffff, CFG_TIMER01_VABASE + REG_TIMER1_VALUE);
|
||
+ writel(CFG_TIMER_CONTROL, CFG_TIMER01_VABASE + REG_TIMER1_CONTROL);
|
||
+
|
||
+ xm510_clocksource.mult =
|
||
+ clocksource_khz2mult(timer1_clk_khz, xm510_clocksource.shift);
|
||
+
|
||
+ xm510_sched_clock_init((void *)CFG_TIMER01_VABASE
|
||
+ + REG_TIMER1_VALUE, timer1_clk_hz);
|
||
+
|
||
+ clocksource_register(&xm510_clocksource);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void __init xm510_timer_init(void)
|
||
+{
|
||
+
|
||
+ edb_trace();
|
||
+
|
||
+
|
||
+ setup_irq(TIMER01_IRQ, &xm510_timer_irq);
|
||
+ setup_irq(TIMER01_IRQ, &xm510_freetimer_irq);
|
||
+
|
||
+ xm510_clocksource_init();
|
||
+ timer0_clockevent.mult =
|
||
+ div_sc(timer0_clk_hz, NSEC_PER_SEC, timer0_clockevent.shift);
|
||
+ timer0_clockevent.max_delta_ns =
|
||
+ clockevent_delta2ns(0xffffffff, &timer0_clockevent);
|
||
+ timer0_clockevent.min_delta_ns =
|
||
+ clockevent_delta2ns(0xf, &timer0_clockevent);
|
||
+
|
||
+ timer0_clockevent.cpumask = cpumask_of(0);
|
||
+ clockevents_register_device(&timer0_clockevent);
|
||
+ edb_trace();
|
||
+}
|
||
+
|
||
+struct sys_timer xm510_timer = {
|
||
+ .init = xm510_timer_init,
|
||
+};
|
||
+
|
||
+//static struct amba_pl011_data uart1_plat_data = {
|
||
+ //.dma_filter = pl330_filter,
|
||
+ //.dma_rx_param = (void *) DMACH_UART1_RX,
|
||
+ //.dma_tx_param = (void *) DMACH_UART1_TX,
|
||
+//};
|
||
+
|
||
+#define XM_AMBADEV_NAME(name) xm_ambadevice_##name
|
||
+
|
||
+#define XM_AMBA_DEVICE(name, busid, base, platdata) \
|
||
+ static struct amba_device XM_AMBADEV_NAME(name) = \
|
||
+ {\
|
||
+ .dev = { \
|
||
+ .coherent_dma_mask = ~0, \
|
||
+ .init_name = busid, \
|
||
+ .platform_data = platdata, \
|
||
+ }, \
|
||
+ .res = { \
|
||
+ .start = base##_BASE, \
|
||
+ .end = base##_BASE + 0x1000 - 1, \
|
||
+ .flags = IORESOURCE_IO, \
|
||
+ }, \
|
||
+ .dma_mask = ~0, \
|
||
+ .irq = { base##_IRQ, NO_IRQ } \
|
||
+ }
|
||
+
|
||
+XM_AMBA_DEVICE(uart0, "uart:0", UART0, NULL);
|
||
+XM_AMBA_DEVICE(uart1, "uart:1", UART1, NULL);
|
||
+//XM_AMBA_DEVICE(uart1, "uart:1", UART1, &uart1_plat_data);
|
||
+
|
||
+static struct amba_device *amba_devs[] __initdata = {
|
||
+ &XM_AMBADEV_NAME(uart0),
|
||
+ &XM_AMBADEV_NAME(uart1),
|
||
+};
|
||
+
|
||
+/*
|
||
+ * These are fixed clocks.
|
||
+ */
|
||
+static struct clk uart_clk = {
|
||
+ .rate = 24000000,
|
||
+};
|
||
+
|
||
+static struct clk_lookup lookups[] = {
|
||
+ { /* UART0 */
|
||
+ .dev_id = "uart:0",
|
||
+ .clk = &uart_clk,
|
||
+ },
|
||
+ { /* UART1 */
|
||
+ .dev_id = "uart:1",
|
||
+ .clk = &uart_clk,
|
||
+ },
|
||
+};
|
||
+
|
||
+void __init xm510_init(void)
|
||
+{
|
||
+ unsigned long i;
|
||
+ unsigned int tmp;
|
||
+
|
||
+ edb_trace();
|
||
+
|
||
+ tmp = readl(GPIO0_MULT_USE_EN);
|
||
+ tmp &= ~((1 << 4) | (1 << 5));
|
||
+ tmp |= (1 << 12) | (1 << 13);
|
||
+ writel(tmp, GPIO0_MULT_USE_EN);
|
||
+
|
||
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
|
||
+
|
||
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
|
||
+ edb_trace();
|
||
+ amba_device_register(amba_devs[i], &iomem_resource);
|
||
+ }
|
||
+}
|
||
+
|
||
+MACHINE_START(XM510, "xm510")
|
||
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
|
||
+ .map_io = xm510_map_io,
|
||
+ .init_irq = xm510_init_irq,
|
||
+ .timer = &xm510_timer,
|
||
+ .init_machine = xm510_init,
|
||
+MACHINE_END
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/dma.c linux-3.0.101.xm510/arch/arm/mach-xm510/dma.c
|
||
--- linux-3.0.101/arch/arm/mach-xm510/dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,138 @@
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/amba/bus.h>
|
||
+#include <linux/amba/pl330.h>
|
||
+
|
||
+#include <asm/mach/arch.h>
|
||
+#include <asm/mach/flash.h>
|
||
+#include <asm/mach/irq.h>
|
||
+#include <asm/mach/time.h>
|
||
+#include <asm/mach/map.h>
|
||
+
|
||
+#include <mach/time.h>
|
||
+#include <mach/hardware.h>
|
||
+#include <mach/irqs.h>
|
||
+#include <linux/clkdev.h>
|
||
+#include "clock.h"
|
||
+#include <mach/dma.h>
|
||
+
|
||
+
|
||
+static struct dma_pl330_peri xm510_dma_peri[] = {
|
||
+ [0] = {
|
||
+ .peri_id = 0,
|
||
+ .rqtype = MEMTOMEM,
|
||
+ },
|
||
+ [1] = {
|
||
+ .peri_id = 1,
|
||
+ .rqtype = MEMTOMEM,
|
||
+ },
|
||
+ [2] = {
|
||
+ .peri_id = DMACH_SPI0_TX,
|
||
+ .rqtype = MEMTODEV,
|
||
+ },
|
||
+ [3] = {
|
||
+ .peri_id = DMACH_SPI0_RX,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [4] = {
|
||
+ .peri_id = DMACH_SPI1_TX,
|
||
+ .rqtype = MEMTODEV,
|
||
+ },
|
||
+ [4] = {
|
||
+ .peri_id = DMACH_SPI1_RX,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [6] = {
|
||
+ .peri_id = DMACH_SPI2_TX,
|
||
+ .rqtype = MEMTODEV,
|
||
+ },
|
||
+ [7] = {
|
||
+ .peri_id = DMACH_SPI2_RX,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [8] = {
|
||
+ .peri_id = DMACH_I2S,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [9] = {
|
||
+ .peri_id = DMACH_UART0_TX,
|
||
+ .rqtype = MEMTODEV,
|
||
+ },
|
||
+ [10] = {
|
||
+ .peri_id = DMACH_UART0_RX,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [11] = {
|
||
+ .peri_id = DMACH_UART1_TX,
|
||
+ .rqtype = MEMTODEV,
|
||
+ },
|
||
+ [12] = {
|
||
+ .peri_id = DMACH_UART1_RX,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [13] = {
|
||
+ .peri_id = DMACH_UART2_TX,
|
||
+ .rqtype = MEMTODEV,
|
||
+ },
|
||
+ [14] = {
|
||
+ .peri_id = DMACH_UART2_RX,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [15] = {
|
||
+ .peri_id = DMACH_I2S_TX,
|
||
+ .rqtype = MEMTODEV,
|
||
+ },
|
||
+ [16] = {
|
||
+ .peri_id = DMACH_I2S_RX,
|
||
+ .rqtype = DEVTOMEM,
|
||
+ },
|
||
+ [17] = {
|
||
+ .peri_id = DMACH_MAX,
|
||
+ .rqtype = MEMTOMEM,
|
||
+ },
|
||
+};
|
||
+
|
||
+static struct dma_pl330_platdata xm510_dma_platdata = {
|
||
+ .nr_valid_peri = ARRAY_SIZE(xm510_dma_peri),
|
||
+ .peri = &xm510_dma_peri[0],
|
||
+};
|
||
+
|
||
+
|
||
+#define XM_AMBADEV_NAME(name) xm_ambadevice_##name
|
||
+
|
||
+#define XM_AMBA_DEVICE(name, busid, base, platdata) \
|
||
+ static struct amba_device XM_AMBADEV_NAME(name) = \
|
||
+ {\
|
||
+ .dev = { \
|
||
+ .coherent_dma_mask = ~0, \
|
||
+ .init_name = busid, \
|
||
+ .platform_data = platdata, \
|
||
+ }, \
|
||
+ .res = { \
|
||
+ .start = base##_BASE, \
|
||
+ .end = base##_BASE + 0x1000 - 1, \
|
||
+ .flags = IORESOURCE_IO, \
|
||
+ }, \
|
||
+ .dma_mask = ~0, \
|
||
+ .irq = { base##_IRQ, NO_IRQ } \
|
||
+ }
|
||
+
|
||
+XM_AMBA_DEVICE(dmac, "dmac", DMAC, &xm510_dma_platdata);
|
||
+
|
||
+static struct clk dmac_clk = {
|
||
+ .rate = 50000000,
|
||
+};
|
||
+
|
||
+static struct clk_lookup lookups[] = {
|
||
+ { /* DMAC */
|
||
+ .dev_id = "dmac",
|
||
+ .clk = &dmac_clk,
|
||
+ },
|
||
+};
|
||
+static int __init xm510_dmac_init(void)
|
||
+{
|
||
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
|
||
+ amba_device_register(&XM_AMBADEV_NAME(dmac), &iomem_resource);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+arch_initcall(xm510_dmac_init);
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/clkdev.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/clkdev.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/clkdev.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/clkdev.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,8 @@
|
||
+#ifndef __ASM_MACH_CLKDEV_H
|
||
+#define __ASM_MACH_CLKDEV_H
|
||
+
|
||
+#define __clk_get(clk) ({ 1; })
|
||
+#define __clk_put(clk) do { } while (0)
|
||
+
|
||
+#endif
|
||
+
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/debug-macro.S linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/debug-macro.S
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/debug-macro.S 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/debug-macro.S 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,7 @@
|
||
+ .macro addruart, rp, rv
|
||
+ mov \rp, #0x00030000
|
||
+ ldr \rv, =0xFE030000 @ virtual base
|
||
+ orr \rp, \rp, #0x10000000 @ physical base
|
||
+ .endm
|
||
+
|
||
+#include <asm/hardware/debug-pl01x.S>
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/dma.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/dma.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/dma.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/dma.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,25 @@
|
||
+#ifndef __DMA_H_
|
||
+#define __DMA_H_
|
||
+
|
||
+enum dma_ch {
|
||
+ DMACH_RES0,
|
||
+ DMACH_RES1,
|
||
+ DMACH_SPI0_TX,
|
||
+ DMACH_SPI0_RX,
|
||
+ DMACH_SPI1_TX,
|
||
+ DMACH_SPI1_RX,
|
||
+ DMACH_SPI2_TX,
|
||
+ DMACH_SPI2_RX,
|
||
+ DMACH_I2S,
|
||
+ DMACH_UART0_TX,
|
||
+ DMACH_UART0_RX,
|
||
+ DMACH_UART1_TX,
|
||
+ DMACH_UART1_RX,
|
||
+ DMACH_UART2_TX,
|
||
+ DMACH_UART2_RX,
|
||
+ DMACH_I2S_TX,
|
||
+ DMACH_I2S_RX,
|
||
+ DMACH_MAX,
|
||
+};
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/early-debug.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/early-debug.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/early-debug.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/early-debug.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,35 @@
|
||
+#ifndef __ASM_ARCH_EARLY_DEBUG_H
|
||
+#define __ASM_ARCH_EARLY_DEBUG_H
|
||
+
|
||
+#ifdef CONFIG_DEBUG_LL
|
||
+
|
||
+extern void printascii(const char *);
|
||
+extern void printhex8(long);
|
||
+extern void printch(char);
|
||
+
|
||
+#define edb_putc(x) printch(x)
|
||
+#define edb_putstr(x) printascii(x)
|
||
+#define edb_puthex(x) printhex8(x)
|
||
+#define edb_putul(x) printhex8(x)
|
||
+#define edb_trace() do {\
|
||
+ edb_putstr(__func__);\
|
||
+ edb_putstr("\t");\
|
||
+ edb_putstr("\t");\
|
||
+ edb_putstr("[");\
|
||
+ edb_putstr(__FILE__);\
|
||
+ edb_putstr(":");\
|
||
+ edb_putul(__LINE__);\
|
||
+ edb_putstr("]\n");\
|
||
+} while (0)
|
||
+
|
||
+#else
|
||
+
|
||
+#define edb_putc(x)
|
||
+#define edb_puthex(x)
|
||
+#define edb_putul(x)
|
||
+#define edb_putstr(x)
|
||
+#define edb_trace(level)
|
||
+
|
||
+#endif
|
||
+
|
||
+#endif /* __ASM_ARCH_EARLY_DEBUG_H */
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/entry-macro.S linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/entry-macro.S
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/entry-macro.S 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/entry-macro.S 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,45 @@
|
||
+/*
|
||
+ * arch/arm/mach-versatile/include/mach/entry-macro.S
|
||
+ *
|
||
+ * Low-level IRQ helper macros for Versatile platforms
|
||
+ *
|
||
+ * This file is licensed under the terms of the GNU General Public
|
||
+ * License version 2. This program is licensed "as is" without any
|
||
+ * warranty of any kind, whether express or implied.
|
||
+ */
|
||
+#include <mach/hardware.h>
|
||
+#include <mach/platform.h>
|
||
+#include <asm/hardware/vic.h>
|
||
+
|
||
+ .macro disable_fiq
|
||
+ .endm
|
||
+
|
||
+ .macro get_irqnr_preamble, base, tmp
|
||
+ ldr \base, =0xFE110000
|
||
+ .endm
|
||
+
|
||
+ .macro arch_ret_to_user, tmp1, tmp2
|
||
+ .endm
|
||
+
|
||
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
||
+ /* get masked status */
|
||
+ ldr \irqstat, [\base, #REG_INTC_IRQSTATUS]
|
||
+ mov \irqnr, #0
|
||
+ teq \irqstat, #0
|
||
+ beq 1003f
|
||
+
|
||
+1001: tst \irqstat, #15
|
||
+ bne 1002f
|
||
+ add \irqnr, \irqnr, #4
|
||
+ movs \irqstat, \irqstat, lsr #4
|
||
+ bne 1001b
|
||
+1002: tst \irqstat, #1
|
||
+ bne 1003f
|
||
+ add \irqnr, \irqnr, #1
|
||
+ movs \irqstat, \irqstat, lsr #1
|
||
+ bne 1002b
|
||
+1003: /* EQ will be set if no irqs pending */
|
||
+
|
||
+@ clz \irqnr, \irqstat
|
||
+@1003: /* EQ will be set if we reach MAXIRQNUM */
|
||
+ .endm
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/hardware.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/hardware.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/hardware.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/hardware.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,8 @@
|
||
+#ifndef __ASM_ARCH_HARDWARE_H
|
||
+#define __ASM_ARCH_HARDWARE_H
|
||
+
|
||
+#include <mach/platform.h>
|
||
+#include <mach/io.h>
|
||
+
|
||
+#endif
|
||
+
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/io.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/io.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/io.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/io.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,31 @@
|
||
+#ifndef __ASM_ARM_ARCH_IO_H
|
||
+#define __ASM_ARM_ARCH_IO_H
|
||
+
|
||
+#define IO_SPACE_LIMIT 0xffffffff
|
||
+
|
||
+#define __io(a) __typesafe_io(a)
|
||
+#define __mem_pci(a) (a)
|
||
+
|
||
+#define XM510_IOCH1_PHYS 0x10000000 /* 0x1000_0000 ~ 0x1010_0000 */
|
||
+#define XM510_IOCH2_PHYS 0x20000000 /* 0x2000_0000 ~ 0x2010_0000 */
|
||
+#define XM510_IOCH1_SIZE 0x100000 /*<2A><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>1M<31><4D><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>*/
|
||
+#define XM510_IOCH2_SIZE 0x100000
|
||
+
|
||
+#define XM510_IOCH1_VIRT 0xFE000000
|
||
+#define XM510_IOCH2_VIRT (XM510_IOCH1_VIRT + XM510_IOCH1_SIZE)
|
||
+
|
||
+/*
|
||
+ * physical addr <---> virtual addr
|
||
+ * [0x1000_0000 ~ 0x1010_0000) <---> [0xFE00_0000 ~ 0xFE10_0000)
|
||
+ * [0x2000_0000 ~ 0x2010_0000) <---> [0xFE10_0000 ~ 0xFE20_0000)
|
||
+ */
|
||
+/*#define IO_ADDRESS(x) (((((x) >> 24) - 0x10) << 16) + ((x) & 0xFFFFF) + XM510_IOCH1_VIRT)*/
|
||
+
|
||
+#define IO_IOCH1_OFFSET (XM510_IOCH1_VIRT - XM510_IOCH1_PHYS)
|
||
+#define IO_IOCH2_OFFSET (XM510_IOCH2_VIRT - XM510_IOCH2_PHYS)
|
||
+
|
||
+#define IO_ADDRESS(x) ((x) >= XM510_IOCH2_PHYS ? (x) + IO_IOCH2_OFFSET\
|
||
+ : (x) + IO_IOCH1_OFFSET)
|
||
+
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/irqs.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/irqs.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/irqs.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/irqs.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,18 @@
|
||
+#ifndef __HI_IRQS_H__
|
||
+#define __HI_IRQS_H__
|
||
+
|
||
+#define XM510_IRQ_START (0)
|
||
+
|
||
+#define TIMER01_IRQ (XM510_IRQ_START + 4)
|
||
+#define TIMER23_IRQ (XM510_IRQ_START + 5)
|
||
+#define UART0_IRQ (XM510_IRQ_START + 0)
|
||
+#define UART1_IRQ (XM510_IRQ_START + 1)
|
||
+#define UART2_IRQ (XM510_IRQ_START + 2)
|
||
+#define DMAC_IRQ (XM510_IRQ_START + 10)
|
||
+#define GMAC_IRQ (XM510_IRQ_START + 3)
|
||
+#define SDIO1_IRQ (XM510_IRQ_START + 24)
|
||
+#define SDIO2_IRQ (XM510_IRQ_START + 25)
|
||
+
|
||
+#define NR_IRQS (XM510_IRQ_START + 32)
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/memory.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/memory.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/memory.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/memory.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,9 @@
|
||
+#ifndef __ASM_ARCH_MEMORY_H
|
||
+#define __ASM_ARCH_MEMORY_H
|
||
+
|
||
+/*
|
||
+ * Physical DRAM offset.
|
||
+ */
|
||
+#define PLAT_PHYS_OFFSET UL(0x80000000)
|
||
+
|
||
+#endif /* __ASM_ARCH_MEMORY_H */
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/platform.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/platform.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/platform.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/platform.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,60 @@
|
||
+#ifndef __HI_CHIP_REGS_H__
|
||
+#define __HI_CHIP_REGS_H__
|
||
+
|
||
+#include <mach/io.h>
|
||
+
|
||
+/*#define DDR_BASE 0x80000000*/
|
||
+/**//*#define DDRC_BASE 0x20110000*/
|
||
+/**//*#define IOCONFIG_BASE 0x200F0000*/
|
||
+#define UART0_BASE 0x10030000
|
||
+#define UART1_BASE 0x10040000
|
||
+#define UART2_BASE 0x10050000
|
||
+#define SYS_CTRL_BASE 0x20000000
|
||
+/*#define WDG_BASE 0x20040000*/
|
||
+/*#define CRG_REG_BASE 0x20030000*/
|
||
+#define TIMER23_BASE 0x100D0000
|
||
+#define TIMER01_BASE 0x100C0000
|
||
+#define INTC_BASE 0x20010000
|
||
+#define DMAC_BASE 0x20020000
|
||
+#define GPIO_BASE 0x10020000
|
||
+#define GMAC_BASE 0x20040000
|
||
+#define SDIO1_BASE 0x50000000
|
||
+#define SDIO2_BASE 0x50010000
|
||
+
|
||
+#define GPIO78_MULT_USE_EN 0x01C
|
||
+
|
||
+#define REG_INTC_IRQSTATUS 0x000
|
||
+#define REG_INTC_FIQSTATUS 0x004
|
||
+#define REG_INTC_RAWSTATUS 0x008
|
||
+#define REG_INTC_INTSELECT 0x00C
|
||
+#define REG_INTC_INTENABLE 0x010
|
||
+#define REG_INTC_INTENCLEAR 0x014
|
||
+#define REG_INTC_SOFTINT 0x018
|
||
+#define REG_INTC_SOFTINTCLEAR 0x01C
|
||
+
|
||
+#define INTNR_IRQ_START 0
|
||
+#define INTNR_IRQ_END 31
|
||
+
|
||
+#define REG_TIMER_RELOAD 0x000
|
||
+#define REG_TIMER_VALUE 0x004
|
||
+#define REG_TIMER_CONTROL 0x008
|
||
+#define REG_TIMER_INTCLR 0x00C
|
||
+#define REG_TIMER_RIS 0x010
|
||
+#define REG_TIMER_MIS 0x014
|
||
+#define REG_TIMER_BGLOAD 0x018
|
||
+
|
||
+#define REG_TIMER1_RELOAD 0x020
|
||
+#define REG_TIMER1_VALUE 0x024
|
||
+#define REG_TIMER1_CONTROL 0x028
|
||
+#define REG_TIMER1_INTCLR 0x02C
|
||
+#define REG_TIMER1_RIS 0x030
|
||
+#define REG_TIMER1_MIS 0x034
|
||
+#define REG_TIMER1_BGLOAD 0x038
|
||
+
|
||
+#define REG_SC_SYSRES 0x4
|
||
+
|
||
+#define REG_SYS_SOFT_RSTEN 0x30
|
||
+#define REG_SYS_SOFT_RST 0x34
|
||
+
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/system.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/system.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/system.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/system.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,23 @@
|
||
+#ifndef __ASM_ARCH_SYSTEM_H
|
||
+#define __ASM_ARCH_SYSTEM_H
|
||
+
|
||
+#include <linux/io.h>
|
||
+#include <mach/platform.h>
|
||
+
|
||
+
|
||
+#define ioaddr_sys(x) IO_ADDRESS(SYS_CTRL_BASE + (x))
|
||
+static inline void arch_idle(void)
|
||
+{
|
||
+ /*
|
||
+ * This should do all the clock switching
|
||
+ * and wait for interrupt tricks
|
||
+ */
|
||
+ cpu_do_idle();
|
||
+}
|
||
+
|
||
+static inline void arch_reset(char mode, const char *cmd)
|
||
+{
|
||
+ writel(1, ioaddr_sys(REG_SYS_SOFT_RSTEN));
|
||
+ writel(0xca110000, ioaddr_sys(REG_SYS_SOFT_RST));
|
||
+}
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/time.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/time.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/time.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/time.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,23 @@
|
||
+#ifndef ASM_HISI_TIMER
|
||
+#define ASM_HISI_TIMER
|
||
+
|
||
+#define CFG_TIMER01_VABASE IO_ADDRESS(TIMER01_BASE)
|
||
+#define CFG_TIMER23_VABASE IO_ADDRESS(TIMER23_BASE)
|
||
+
|
||
+#define CFG_TIMER_CONTROL (CFG_TIMER_ENABLE | CFG_TIMER_PERIODIC\
|
||
+ | CFG_TIMER_INTMASK | CFG_TIMER_32BIT)
|
||
+#define CFG_TIMER_ONE_CONTROL (CFG_TIMER_ENABLE | CFG_TIMER_INTMASK\
|
||
+ | CFG_TIMER_32BIT | CFG_TIMER_ONESHOT)
|
||
+
|
||
+#define CFG_TIMER_ENABLE (1 << 7)
|
||
+#define CFG_TIMER_PERIODIC (1 << 6)
|
||
+#define CFG_TIMER_INTMASK (1 << 5)
|
||
+#define CFG_TIMER_32BIT (1 << 1)
|
||
+#define CFG_TIMER_ONESHOT (1 << 0)
|
||
+
|
||
+#define CFG_TIMER_CLK 24000000
|
||
+
|
||
+#define CFG_TIMER_INTNR TIMER01_IRQ
|
||
+
|
||
+extern unsigned long long sched_clock(void);
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/timex.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/timex.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/timex.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/timex.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,6 @@
|
||
+#ifndef __XM_TIMEX__
|
||
+#define __XM_TIMEX__
|
||
+
|
||
+#define CLOCK_TICK_RATE 24000000
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/uncompress.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/uncompress.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/uncompress.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/uncompress.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,33 @@
|
||
+#ifndef __XM_UNCOMPRESS_H__
|
||
+#define __XM_UNCOMPRESS_H__
|
||
+#include <mach/platform.h>
|
||
+
|
||
+#define AMBA_UART_DR (*(unsigned long *)(UART0_BASE + 0x0))
|
||
+#define AMBA_UART_LCRH (*(unsigned long *)(UART0_BASE + 0x2c))
|
||
+#define AMBA_UART_CR (*(unsigned long *)(UART0_BASE + 0x30))
|
||
+#define AMBA_UART_FR (*(unsigned long *)(UART0_BASE + 0x18))
|
||
+
|
||
+/*
|
||
+ * This does not append a newline
|
||
+ */
|
||
+static inline void putc(int c)
|
||
+{
|
||
+ while (AMBA_UART_FR & (1 << 5))
|
||
+ barrier();
|
||
+
|
||
+ AMBA_UART_DR = c;
|
||
+}
|
||
+
|
||
+static inline void flush(void)
|
||
+{
|
||
+ while (AMBA_UART_FR & (1 << 3))
|
||
+ barrier();
|
||
+}
|
||
+
|
||
+/*
|
||
+ * nothing to do
|
||
+ */
|
||
+#define arch_decomp_setup()
|
||
+#define arch_decomp_wdog()
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/include/mach/vmalloc.h linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/vmalloc.h
|
||
--- linux-3.0.101/arch/arm/mach-xm510/include/mach/vmalloc.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/include/mach/vmalloc.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,6 @@
|
||
+#ifndef __XM_VMALLOC_H__
|
||
+#define __XM_VMALLOC_H__
|
||
+
|
||
+#define VMALLOC_END (PAGE_OFFSET + 0x3e000000)
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/Kconfig linux-3.0.101.xm510/arch/arm/mach-xm510/Kconfig
|
||
--- linux-3.0.101/arch/arm/mach-xm510/Kconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,19 @@
|
||
+menu "xm510 board feature"
|
||
+
|
||
+config MACH_XM510
|
||
+ bool "Support xm510 platform"
|
||
+ select CPU_V7
|
||
+ select PL330
|
||
+ help
|
||
+ Includes support for the hisilion xm510 platform.
|
||
+
|
||
+ This includes specific configurations for the module and
|
||
+ its peripherals.
|
||
+
|
||
+config DEFAULT_BUSCLK
|
||
+ int "DEFAULT_BUSCLK"
|
||
+ default "50000000"
|
||
+ help
|
||
+ Default AHB clock rate
|
||
+
|
||
+endmenu
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/Makefile linux-3.0.101.xm510/arch/arm/mach-xm510/Makefile
|
||
--- linux-3.0.101/arch/arm/mach-xm510/Makefile 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,2 @@
|
||
+obj-y := core.o clock.o
|
||
+obj-$(CONFIG_DMA_ENGINE) += dma.o
|
||
diff -urN linux-3.0.101/arch/arm/mach-xm510/Makefile.boot linux-3.0.101.xm510/arch/arm/mach-xm510/Makefile.boot
|
||
--- linux-3.0.101/arch/arm/mach-xm510/Makefile.boot 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/mach-xm510/Makefile.boot 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,3 @@
|
||
+zreladdr-y := 0x80008000
|
||
+params_phys-y := 0x00000100
|
||
+initrd_phys-y := 0x00800000
|
||
diff -urN linux-3.0.101/arch/arm/Makefile linux-3.0.101.xm510/arch/arm/Makefile
|
||
--- linux-3.0.101/arch/arm/Makefile 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -196,6 +196,7 @@
|
||
machine-$(CONFIG_MACH_SPEAR310) := spear3xx
|
||
machine-$(CONFIG_MACH_SPEAR320) := spear3xx
|
||
machine-$(CONFIG_MACH_SPEAR600) := spear6xx
|
||
+machine-$(CONFIG_ARCH_XM510) := xm510
|
||
|
||
# Platform directory name. This list is sorted alphanumerically
|
||
# by CONFIG_* macro name.
|
||
diff -urN linux-3.0.101/arch/arm/tools/mach-types linux-3.0.101.xm510/arch/arm/tools/mach-types
|
||
--- linux-3.0.101/arch/arm/tools/mach-types 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/arch/arm/tools/mach-types 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -1113,3 +1113,4 @@
|
||
thales_adc MACH_THALES_ADC THALES_ADC 3492
|
||
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
|
||
atdgp318 MACH_ATDGP318 ATDGP318 3494
|
||
+xm510 MACH_XM510 XM510 8888
|
||
diff -urN linux-3.0.101/drivers/amba/bus.c linux-3.0.101.xm510/drivers/amba/bus.c
|
||
--- linux-3.0.101/drivers/amba/bus.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/amba/bus.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -632,7 +632,6 @@
|
||
for (cid = 0, i = 0; i < 4; i++)
|
||
cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) <<
|
||
(i * 8);
|
||
-
|
||
amba_put_disable_pclk(dev);
|
||
|
||
if (cid == AMBA_CID)
|
||
diff -urN linux-3.0.101/drivers/dma/acpi-dma.c linux-3.0.101.xm510/drivers/dma/acpi-dma.c
|
||
--- linux-3.0.101/drivers/dma/acpi-dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/acpi-dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,445 @@
|
||
+/*
|
||
+ * ACPI helpers for DMA request / controller
|
||
+ *
|
||
+ * Based on of-dma.c
|
||
+ *
|
||
+ * Copyright (C) 2013, Intel Corporation
|
||
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/device.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/list.h>
|
||
+#include <linux/mutex.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/ioport.h>
|
||
+#include <linux/acpi.h>
|
||
+#include <linux/acpi_dma.h>
|
||
+
|
||
+static LIST_HEAD(acpi_dma_list);
|
||
+static DEFINE_MUTEX(acpi_dma_lock);
|
||
+
|
||
+/**
|
||
+ * acpi_dma_parse_resource_group - match device and parse resource group
|
||
+ * @grp: CSRT resource group
|
||
+ * @adev: ACPI device to match with
|
||
+ * @adma: struct acpi_dma of the given DMA controller
|
||
+ *
|
||
+ * Returns 1 on success, 0 when no information is available, or appropriate
|
||
+ * errno value on error.
|
||
+ *
|
||
+ * In order to match a device from DSDT table to the corresponding CSRT device
|
||
+ * we use MMIO address and IRQ.
|
||
+ */
|
||
+static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
|
||
+ struct acpi_device *adev, struct acpi_dma *adma)
|
||
+{
|
||
+ const struct acpi_csrt_shared_info *si;
|
||
+ struct list_head resource_list;
|
||
+ struct resource_list_entry *rentry;
|
||
+ resource_size_t mem = 0, irq = 0;
|
||
+ u32 vendor_id;
|
||
+ int ret;
|
||
+
|
||
+ if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
|
||
+ return -ENODEV;
|
||
+
|
||
+ INIT_LIST_HEAD(&resource_list);
|
||
+ ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
|
||
+ if (ret <= 0)
|
||
+ return 0;
|
||
+
|
||
+ list_for_each_entry(rentry, &resource_list, node) {
|
||
+ if (resource_type(&rentry->res) == IORESOURCE_MEM)
|
||
+ mem = rentry->res.start;
|
||
+ else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
|
||
+ irq = rentry->res.start;
|
||
+ }
|
||
+
|
||
+ acpi_dev_free_resource_list(&resource_list);
|
||
+
|
||
+ /* Consider initial zero values as resource not found */
|
||
+ if (mem == 0 && irq == 0)
|
||
+ return 0;
|
||
+
|
||
+ si = (const struct acpi_csrt_shared_info *)&grp[1];
|
||
+
|
||
+ /* Match device by MMIO and IRQ */
|
||
+ if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
|
||
+ return 0;
|
||
+
|
||
+ vendor_id = le32_to_cpu(grp->vendor_id);
|
||
+ dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
|
||
+ (char *)&vendor_id, grp->device_id, grp->revision);
|
||
+
|
||
+ /* Check if the request line range is available */
|
||
+ if (si->base_request_line == 0 && si->num_handshake_signals == 0)
|
||
+ return 0;
|
||
+
|
||
+ adma->base_request_line = si->base_request_line;
|
||
+ adma->end_request_line = si->base_request_line +
|
||
+ si->num_handshake_signals - 1;
|
||
+
|
||
+ dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n",
|
||
+ adma->base_request_line, adma->end_request_line);
|
||
+
|
||
+ return 1;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
|
||
+ * @adev: ACPI device to match with
|
||
+ * @adma: struct acpi_dma of the given DMA controller
|
||
+ *
|
||
+ * CSRT or Core System Resources Table is a proprietary ACPI table
|
||
+ * introduced by Microsoft. This table can contain devices that are not in
|
||
+ * the system DSDT table. In particular DMA controllers might be described
|
||
+ * here.
|
||
+ *
|
||
+ * We are using this table to get the request line range of the specific DMA
|
||
+ * controller to be used later.
|
||
+ *
|
||
+ */
|
||
+static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
|
||
+{
|
||
+ struct acpi_csrt_group *grp, *end;
|
||
+ struct acpi_table_csrt *csrt;
|
||
+ acpi_status status;
|
||
+ int ret;
|
||
+
|
||
+ status = acpi_get_table(ACPI_SIG_CSRT, 0,
|
||
+ (struct acpi_table_header **)&csrt);
|
||
+ if (ACPI_FAILURE(status)) {
|
||
+ if (status != AE_NOT_FOUND)
|
||
+ dev_warn(&adev->dev, "failed to get the CSRT table\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ grp = (struct acpi_csrt_group *)(csrt + 1);
|
||
+ end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
|
||
+
|
||
+ while (grp < end) {
|
||
+ ret = acpi_dma_parse_resource_group(grp, adev, adma);
|
||
+ if (ret < 0) {
|
||
+ dev_warn(&adev->dev,
|
||
+ "error in parsing resource group\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
|
||
+ }
|
||
+}
|
||
+
|
||
+/**
|
||
+ * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
|
||
+ * @dev: struct device of DMA controller
|
||
+ * @acpi_dma_xlate: translation function which converts a dma specifier
|
||
+ * into a dma_chan structure
|
||
+ * @data pointer to controller specific data to be used by
|
||
+ * translation function
|
||
+ *
|
||
+ * Returns 0 on success or appropriate errno value on error.
|
||
+ *
|
||
+ * Allocated memory should be freed with appropriate acpi_dma_controller_free()
|
||
+ * call.
|
||
+ */
|
||
+int acpi_dma_controller_register(struct device *dev,
|
||
+ struct dma_chan *(*acpi_dma_xlate)
|
||
+ (struct acpi_dma_spec *, struct acpi_dma *),
|
||
+ void *data)
|
||
+{
|
||
+ struct acpi_device *adev;
|
||
+ struct acpi_dma *adma;
|
||
+
|
||
+ if (!dev || !acpi_dma_xlate)
|
||
+ return -EINVAL;
|
||
+
|
||
+ /* Check if the device was enumerated by ACPI */
|
||
+ if (!ACPI_HANDLE(dev))
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
|
||
+ return -EINVAL;
|
||
+
|
||
+ adma = kzalloc(sizeof(*adma), GFP_KERNEL);
|
||
+ if (!adma)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ adma->dev = dev;
|
||
+ adma->acpi_dma_xlate = acpi_dma_xlate;
|
||
+ adma->data = data;
|
||
+
|
||
+ acpi_dma_parse_csrt(adev, adma);
|
||
+
|
||
+ /* Now queue acpi_dma controller structure in list */
|
||
+ mutex_lock(&acpi_dma_lock);
|
||
+ list_add_tail(&adma->dma_controllers, &acpi_dma_list);
|
||
+ mutex_unlock(&acpi_dma_lock);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
|
||
+
|
||
+/**
|
||
+ * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list
|
||
+ * @dev: struct device of DMA controller
|
||
+ *
|
||
+ * Memory allocated by acpi_dma_controller_register() is freed here.
|
||
+ */
|
||
+int acpi_dma_controller_free(struct device *dev)
|
||
+{
|
||
+ struct acpi_dma *adma;
|
||
+
|
||
+ if (!dev)
|
||
+ return -EINVAL;
|
||
+
|
||
+ mutex_lock(&acpi_dma_lock);
|
||
+
|
||
+ list_for_each_entry(adma, &acpi_dma_list, dma_controllers)
|
||
+ if (adma->dev == dev) {
|
||
+ list_del(&adma->dma_controllers);
|
||
+ mutex_unlock(&acpi_dma_lock);
|
||
+ kfree(adma);
|
||
+ return 0;
|
||
+ }
|
||
+
|
||
+ mutex_unlock(&acpi_dma_lock);
|
||
+ return -ENODEV;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
|
||
+
|
||
+static void devm_acpi_dma_release(struct device *dev, void *res)
|
||
+{
|
||
+ acpi_dma_controller_free(dev);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
|
||
+ * @dev: device that is registering this DMA controller
|
||
+ * @acpi_dma_xlate: translation function
|
||
+ * @data pointer to controller specific data
|
||
+ *
|
||
+ * Managed acpi_dma_controller_register(). DMA controller registered by this
|
||
+ * function are automatically freed on driver detach. See
|
||
+ * acpi_dma_controller_register() for more information.
|
||
+ */
|
||
+int devm_acpi_dma_controller_register(struct device *dev,
|
||
+ struct dma_chan *(*acpi_dma_xlate)
|
||
+ (struct acpi_dma_spec *, struct acpi_dma *),
|
||
+ void *data)
|
||
+{
|
||
+ void *res;
|
||
+ int ret;
|
||
+
|
||
+ res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
|
||
+ if (!res)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
|
||
+ if (ret) {
|
||
+ devres_free(res);
|
||
+ return ret;
|
||
+ }
|
||
+ devres_add(dev, res);
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
|
||
+
|
||
+/**
|
||
+ * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
|
||
+ *
|
||
+ * Unregister a DMA controller registered with
|
||
+ * devm_acpi_dma_controller_register(). Normally this function will not need to
|
||
+ * be called and the resource management code will ensure that the resource is
|
||
+ * freed.
|
||
+ */
|
||
+void devm_acpi_dma_controller_free(struct device *dev)
|
||
+{
|
||
+ WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
|
||
+
|
||
+/**
|
||
+ * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
|
||
+ * @adma: struct acpi_dma of DMA controller
|
||
+ * @dma_spec: dma specifier to update
|
||
+ *
|
||
+ * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
|
||
+ *
|
||
+ * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
|
||
+ * Descriptor":
|
||
+ * DMA Request Line bits is a platform-relative number uniquely
|
||
+ * identifying the request line assigned. Request line-to-Controller
|
||
+ * mapping is done in a controller-specific OS driver.
|
||
+ * That's why we can safely adjust slave_id when the appropriate controller is
|
||
+ * found.
|
||
+ */
|
||
+static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
|
||
+ struct acpi_dma_spec *dma_spec)
|
||
+{
|
||
+ /* Set link to the DMA controller device */
|
||
+ dma_spec->dev = adma->dev;
|
||
+
|
||
+ /* Check if the request line range is available */
|
||
+ if (adma->base_request_line == 0 && adma->end_request_line == 0)
|
||
+ return 0;
|
||
+
|
||
+ /* Check if slave_id falls to the range */
|
||
+ if (dma_spec->slave_id < adma->base_request_line ||
|
||
+ dma_spec->slave_id > adma->end_request_line)
|
||
+ return -1;
|
||
+
|
||
+ /*
|
||
+ * Here we adjust slave_id. It should be a relative number to the base
|
||
+ * request line.
|
||
+ */
|
||
+ dma_spec->slave_id -= adma->base_request_line;
|
||
+
|
||
+ return 1;
|
||
+}
|
||
+
|
||
+struct acpi_dma_parser_data {
|
||
+ struct acpi_dma_spec dma_spec;
|
||
+ size_t index;
|
||
+ size_t n;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier
|
||
+ * @res: struct acpi_resource to get FixedDMA resources from
|
||
+ * @data: pointer to a helper struct acpi_dma_parser_data
|
||
+ */
|
||
+static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
|
||
+{
|
||
+ struct acpi_dma_parser_data *pdata = data;
|
||
+
|
||
+ if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
|
||
+ struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma;
|
||
+
|
||
+ if (pdata->n++ == pdata->index) {
|
||
+ pdata->dma_spec.chan_id = dma->channels;
|
||
+ pdata->dma_spec.slave_id = dma->request_lines;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Tell the ACPI core to skip this resource */
|
||
+ return 1;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel
|
||
+ * @dev: struct device to get DMA request from
|
||
+ * @index: index of FixedDMA descriptor for @dev
|
||
+ *
|
||
+ * Returns pointer to appropriate dma channel on success or NULL on error.
|
||
+ */
|
||
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
|
||
+ size_t index)
|
||
+{
|
||
+ struct acpi_dma_parser_data pdata;
|
||
+ struct acpi_dma_spec *dma_spec = &pdata.dma_spec;
|
||
+ struct list_head resource_list;
|
||
+ struct acpi_device *adev;
|
||
+ struct acpi_dma *adma;
|
||
+ struct dma_chan *chan = NULL;
|
||
+ int found;
|
||
+
|
||
+ /* Check if the device was enumerated by ACPI */
|
||
+ if (!dev || !ACPI_HANDLE(dev))
|
||
+ return NULL;
|
||
+
|
||
+ if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
|
||
+ return NULL;
|
||
+
|
||
+ memset(&pdata, 0, sizeof(pdata));
|
||
+ pdata.index = index;
|
||
+
|
||
+ /* Initial values for the request line and channel */
|
||
+ dma_spec->chan_id = -1;
|
||
+ dma_spec->slave_id = -1;
|
||
+
|
||
+ INIT_LIST_HEAD(&resource_list);
|
||
+ acpi_dev_get_resources(adev, &resource_list,
|
||
+ acpi_dma_parse_fixed_dma, &pdata);
|
||
+ acpi_dev_free_resource_list(&resource_list);
|
||
+
|
||
+ if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
|
||
+ return NULL;
|
||
+
|
||
+ mutex_lock(&acpi_dma_lock);
|
||
+
|
||
+ list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
|
||
+ /*
|
||
+ * We are not going to call translation function if slave_id
|
||
+ * doesn't fall to the request range.
|
||
+ */
|
||
+ found = acpi_dma_update_dma_spec(adma, dma_spec);
|
||
+ if (found < 0)
|
||
+ continue;
|
||
+ chan = adma->acpi_dma_xlate(dma_spec, adma);
|
||
+ /*
|
||
+ * Try to get a channel only from the DMA controller that
|
||
+ * matches the slave_id. See acpi_dma_update_dma_spec()
|
||
+ * description for the details.
|
||
+ */
|
||
+ if (found > 0 || chan)
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ mutex_unlock(&acpi_dma_lock);
|
||
+ return chan;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
|
||
+
|
||
+/**
|
||
+ * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel
|
||
+ * @dev: struct device to get DMA request from
|
||
+ * @name: represents corresponding FixedDMA descriptor for @dev
|
||
+ *
|
||
+ * In order to support both Device Tree and ACPI in a single driver we
|
||
+ * translate the names "tx" and "rx" here based on the most common case where
|
||
+ * the first FixedDMA descriptor is TX and second is RX.
|
||
+ *
|
||
+ * Returns pointer to appropriate dma channel on success or NULL on error.
|
||
+ */
|
||
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
|
||
+ const char *name)
|
||
+{
|
||
+ size_t index;
|
||
+
|
||
+ if (!strcmp(name, "tx"))
|
||
+ index = 0;
|
||
+ else if (!strcmp(name, "rx"))
|
||
+ index = 1;
|
||
+ else
|
||
+ return NULL;
|
||
+
|
||
+ return acpi_dma_request_slave_chan_by_index(dev, index);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
|
||
+
|
||
+/**
|
||
+ * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper
|
||
+ * @dma_spec: pointer to ACPI DMA specifier
|
||
+ * @adma: pointer to ACPI DMA controller data
|
||
+ *
|
||
+ * A simple translation function for ACPI based devices. Passes &struct
|
||
+ * dma_spec to the DMA controller driver provided filter function. Returns
|
||
+ * pointer to the channel if found or %NULL otherwise.
|
||
+ */
|
||
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
|
||
+ struct acpi_dma *adma)
|
||
+{
|
||
+ struct acpi_dma_filter_info *info = adma->data;
|
||
+
|
||
+ if (!info || !info->filter_fn)
|
||
+ return NULL;
|
||
+
|
||
+ return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);
|
||
diff -urN linux-3.0.101/drivers/dma/at_xdmac.c linux-3.0.101.xm510/drivers/dma/at_xdmac.c
|
||
--- linux-3.0.101/drivers/dma/at_xdmac.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/at_xdmac.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,1524 @@
|
||
+/*
|
||
+ * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
|
||
+ *
|
||
+ * Copyright (C) 2014 Atmel Corporation
|
||
+ *
|
||
+ * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published by
|
||
+ * the Free Software Foundation.
|
||
+ *
|
||
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
||
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ * more details.
|
||
+ *
|
||
+ * You should have received a copy of the GNU General Public License along with
|
||
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
||
+ */
|
||
+
|
||
+#include <asm/barrier.h>
|
||
+#include <dt-bindings/dma/at91.h>
|
||
+#include <linux/clk.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dmapool.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/irq.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/list.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/of_dma.h>
|
||
+#include <linux/of_platform.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/pm.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+
|
||
+/* Global registers */
|
||
+#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
|
||
+#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
|
||
+#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
|
||
+#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
|
||
+#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
|
||
+#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
|
||
+#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
|
||
+#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
|
||
+#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
|
||
+#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
|
||
+#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
|
||
+#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
|
||
+#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
|
||
+#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
|
||
+#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
|
||
+#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
|
||
+#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
|
||
+#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
|
||
+#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
|
||
+#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
|
||
+#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
|
||
+
|
||
+/* Channel relative registers offsets */
|
||
+#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
|
||
+#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
|
||
+#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
|
||
+#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
|
||
+#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
|
||
+#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
|
||
+#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
|
||
+#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
|
||
+#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
|
||
+#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
|
||
+#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
|
||
+#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
|
||
+#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
|
||
+#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
|
||
+#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
|
||
+#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
|
||
+#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
|
||
+#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
|
||
+#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
|
||
+#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
|
||
+#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
|
||
+#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
|
||
+#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
|
||
+#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
|
||
+#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
|
||
+#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
|
||
+#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
|
||
+#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
|
||
+#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
|
||
+#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
|
||
+#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
|
||
+#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
|
||
+#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
|
||
+#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
|
||
+#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
|
||
+#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
|
||
+#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
|
||
+#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
|
||
+#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
|
||
+#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
|
||
+#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
|
||
+#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
|
||
+#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
|
||
+#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
|
||
+#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
|
||
+#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
|
||
+#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
|
||
+#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
|
||
+#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
|
||
+#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
|
||
+#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
|
||
+#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
|
||
+#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
|
||
+#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
|
||
+#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
|
||
+#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
|
||
+#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
|
||
+#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
|
||
+#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
|
||
+#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
|
||
+#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
|
||
+#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
|
||
+#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
|
||
+#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
|
||
+#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
|
||
+#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
|
||
+#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
|
||
+#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
|
||
+#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
|
||
+#define AT_XDMAC_CC_DWIDTH_OFFSET 11
|
||
+#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
|
||
+#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
|
||
+#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
|
||
+#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
|
||
+#define AT_XDMAC_CC_DWIDTH_WORD 0x2
|
||
+#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
|
||
+#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
|
||
+#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
|
||
+#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
|
||
+#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
|
||
+#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
|
||
+#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
|
||
+#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
|
||
+#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
|
||
+#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
|
||
+#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
|
||
+#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
|
||
+#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
|
||
+#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
|
||
+#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
|
||
+#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
|
||
+#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
|
||
+#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
|
||
+#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
|
||
+#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
|
||
+#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
|
||
+#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
|
||
+#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
|
||
+#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
|
||
+#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
|
||
+#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
|
||
+
|
||
+#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
|
||
+
|
||
+/* Microblock control members */
|
||
+#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
|
||
+#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
|
||
+#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
|
||
+#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
|
||
+#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
|
||
+#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
|
||
+#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
|
||
+#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
|
||
+
|
||
+#define AT_XDMAC_MAX_CHAN 0x20
|
||
+
|
||
+#define AT_XDMAC_DMA_BUSWIDTHS\
|
||
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
|
||
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
|
||
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
|
||
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
|
||
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
|
||
+
|
||
+enum atc_status {
|
||
+ AT_XDMAC_CHAN_IS_CYCLIC = 0,
|
||
+ AT_XDMAC_CHAN_IS_PAUSED,
|
||
+};
|
||
+
|
||
+/* ----- Channels ----- */
|
||
+struct at_xdmac_chan {
|
||
+ struct dma_chan chan;
|
||
+ void __iomem *ch_regs;
|
||
+ u32 mask; /* Channel Mask */
|
||
+ u32 cfg[2]; /* Channel Configuration Register */
|
||
+ #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
|
||
+ #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
|
||
+ u8 perid; /* Peripheral ID */
|
||
+ u8 perif; /* Peripheral Interface */
|
||
+ u8 memif; /* Memory Interface */
|
||
+ u32 per_src_addr;
|
||
+ u32 per_dst_addr;
|
||
+ u32 save_cc;
|
||
+ u32 save_cim;
|
||
+ u32 save_cnda;
|
||
+ u32 save_cndc;
|
||
+ unsigned long status;
|
||
+ struct tasklet_struct tasklet;
|
||
+
|
||
+ spinlock_t lock;
|
||
+
|
||
+ struct list_head xfers_list;
|
||
+ struct list_head free_descs_list;
|
||
+};
|
||
+
|
||
+
|
||
+/* ----- Controller ----- */
|
||
+struct at_xdmac {
|
||
+ struct dma_device dma;
|
||
+ void __iomem *regs;
|
||
+ int irq;
|
||
+ struct clk *clk;
|
||
+ u32 save_gim;
|
||
+ u32 save_gs;
|
||
+ struct dma_pool *at_xdmac_desc_pool;
|
||
+ struct at_xdmac_chan chan[0];
|
||
+};
|
||
+
|
||
+
|
||
+/* ----- Descriptors ----- */
|
||
+
|
||
+/* Linked List Descriptor */
|
||
+struct at_xdmac_lld {
|
||
+ dma_addr_t mbr_nda; /* Next Descriptor Member */
|
||
+ u32 mbr_ubc; /* Microblock Control Member */
|
||
+ dma_addr_t mbr_sa; /* Source Address Member */
|
||
+ dma_addr_t mbr_da; /* Destination Address Member */
|
||
+ u32 mbr_cfg; /* Configuration Register */
|
||
+};
|
||
+
|
||
+
|
||
+struct at_xdmac_desc {
|
||
+ struct at_xdmac_lld lld;
|
||
+ enum dma_transfer_direction direction;
|
||
+ struct dma_async_tx_descriptor tx_dma_desc;
|
||
+ struct list_head desc_node;
|
||
+ /* Following members are only used by the first descriptor */
|
||
+ bool active_xfer;
|
||
+ unsigned int xfer_size;
|
||
+ struct list_head descs_list;
|
||
+ struct list_head xfer_node;
|
||
+};
|
||
+
|
||
+static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
|
||
+{
|
||
+ return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
|
||
+}
|
||
+
|
||
+#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
|
||
+#define at_xdmac_write(atxdmac, reg, value) \
|
||
+ writel_relaxed((value), (atxdmac)->regs + (reg))
|
||
+
|
||
+#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
|
||
+#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
|
||
+
|
||
+static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
|
||
+{
|
||
+ return container_of(dchan, struct at_xdmac_chan, chan);
|
||
+}
|
||
+
|
||
+static struct device *chan2dev(struct dma_chan *chan)
|
||
+{
|
||
+ return &chan->dev->device;
|
||
+}
|
||
+
|
||
+static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
|
||
+{
|
||
+ return container_of(ddev, struct at_xdmac, dma);
|
||
+}
|
||
+
|
||
+static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
|
||
+{
|
||
+ return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
|
||
+}
|
||
+
|
||
+static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
|
||
+{
|
||
+ return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
|
||
+}
|
||
+
|
||
+static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
|
||
+{
|
||
+ return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
|
||
+}
|
||
+
|
||
+static inline int at_xdmac_csize(u32 maxburst)
|
||
+{
|
||
+ int csize;
|
||
+
|
||
+ csize = ffs(maxburst) - 1;
|
||
+ if (csize > 4)
|
||
+ csize = -EINVAL;
|
||
+
|
||
+ return csize;
|
||
+};
|
||
+
|
||
+static inline u8 at_xdmac_get_dwidth(u32 cfg)
|
||
+{
|
||
+ return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
|
||
+};
|
||
+
|
||
+static unsigned int init_nr_desc_per_channel = 64;
|
||
+module_param(init_nr_desc_per_channel, uint, 0644);
|
||
+MODULE_PARM_DESC(init_nr_desc_per_channel,
|
||
+ "initial descriptors per channel (default: 64)");
|
||
+
|
||
+
|
||
+static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
|
||
+{
|
||
+ return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
|
||
+}
|
||
+
|
||
+static void at_xdmac_off(struct at_xdmac *atxdmac)
|
||
+{
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
|
||
+
|
||
+ /* Wait that all chans are disabled. */
|
||
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
|
||
+ cpu_relax();
|
||
+
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
|
||
+}
|
||
+
|
||
+/* Call with lock hold. */
|
||
+static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
|
||
+ struct at_xdmac_desc *first)
|
||
+{
|
||
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
||
+ u32 reg;
|
||
+
|
||
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
|
||
+
|
||
+ if (at_xdmac_chan_is_enabled(atchan))
|
||
+ return;
|
||
+
|
||
+ /* Set transfer as active to not try to start it again. */
|
||
+ first->active_xfer = true;
|
||
+
|
||
+ /* Tell xdmac where to get the first descriptor. */
|
||
+ reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
|
||
+ | AT_XDMAC_CNDA_NDAIF(atchan->memif);
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
|
||
+
|
||
+ /*
|
||
+ * When doing non cyclic transfer we need to use the next
|
||
+ * descriptor view 2 since some fields of the configuration register
|
||
+ * depend on transfer size and src/dest addresses.
|
||
+ */
|
||
+ if (at_xdmac_chan_is_cyclic(atchan)) {
|
||
+ reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
|
||
+ } else {
|
||
+ /*
|
||
+ * No need to write AT_XDMAC_CC reg, it will be done when the
|
||
+ * descriptor is fecthed.
|
||
+ */
|
||
+ reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
|
||
+ }
|
||
+
|
||
+ reg |= AT_XDMAC_CNDC_NDDUP
|
||
+ | AT_XDMAC_CNDC_NDSUP
|
||
+ | AT_XDMAC_CNDC_NDE;
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
|
||
+
|
||
+ dev_vdbg(chan2dev(&atchan->chan),
|
||
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
|
||
+ __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
|
||
+
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
|
||
+ reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
|
||
+ /*
|
||
+ * There is no end of list when doing cyclic dma, we need to get
|
||
+ * an interrupt after each periods.
|
||
+ */
|
||
+ if (at_xdmac_chan_is_cyclic(atchan))
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
|
||
+ reg | AT_XDMAC_CIE_BIE);
|
||
+ else
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
|
||
+ reg | AT_XDMAC_CIE_LIE);
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
|
||
+ dev_vdbg(chan2dev(&atchan->chan),
|
||
+ "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
|
||
+ wmb();
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
||
+
|
||
+ dev_vdbg(chan2dev(&atchan->chan),
|
||
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
|
||
+ __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
|
||
+
|
||
+}
|
||
+
|
||
+static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct at_xdmac_desc *desc = txd_to_at_desc(tx);
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
|
||
+ dma_cookie_t cookie;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, flags);
|
||
+ cookie = dma_cookie_assign(tx);
|
||
+
|
||
+ dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
|
||
+ __func__, atchan, desc);
|
||
+ list_add_tail(&desc->xfer_node, &atchan->xfers_list);
|
||
+ if (list_is_singular(&atchan->xfers_list))
|
||
+ at_xdmac_start_xfer(atchan, desc);
|
||
+
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
|
||
+ gfp_t gfp_flags)
|
||
+{
|
||
+ struct at_xdmac_desc *desc;
|
||
+ struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
|
||
+ dma_addr_t phys;
|
||
+
|
||
+ desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
|
||
+ if (desc) {
|
||
+ memset(desc, 0, sizeof(*desc));
|
||
+ INIT_LIST_HEAD(&desc->descs_list);
|
||
+ dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
|
||
+ desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
|
||
+ desc->tx_dma_desc.phys = phys;
|
||
+ }
|
||
+
|
||
+ return desc;
|
||
+}
|
||
+
|
||
+/* Call must be protected by lock. */
|
||
+static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
|
||
+{
|
||
+ struct at_xdmac_desc *desc;
|
||
+
|
||
+ if (list_empty(&atchan->free_descs_list)) {
|
||
+ desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
|
||
+ } else {
|
||
+ desc = list_first_entry(&atchan->free_descs_list,
|
||
+ struct at_xdmac_desc, desc_node);
|
||
+ list_del(&desc->desc_node);
|
||
+ desc->active_xfer = false;
|
||
+ }
|
||
+
|
||
+ return desc;
|
||
+}
|
||
+
|
||
+static bool at_xdmac_filter(struct dma_chan *chan, void *slave)
|
||
+{
|
||
+ return chan->device->dev == slave;
|
||
+}
|
||
+
|
||
+static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
|
||
+ struct of_dma *of_dma)
|
||
+{
|
||
+ struct at_xdmac *atxdmac = of_dma->of_dma_data;
|
||
+ struct at_xdmac_chan *atchan;
|
||
+ struct dma_chan *chan;
|
||
+ struct device *dev = atxdmac->dma.dev;
|
||
+ dma_cap_mask_t mask;
|
||
+
|
||
+ if (dma_spec->args_count != 2) {
|
||
+ dev_err(dev, "dma phandler args: bad number of args\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ dma_cap_zero(mask);
|
||
+ dma_cap_set(DMA_SLAVE, mask);
|
||
+ chan = dma_request_channel(mask, at_xdmac_filter, dev);
|
||
+ if (!chan) {
|
||
+ dev_err(dev, "can't get a dma channel\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ atchan = to_at_xdmac_chan(chan);
|
||
+ atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
|
||
+ atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
|
||
+ atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[1]);
|
||
+ dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
|
||
+ atchan->memif, atchan->perif, atchan->perid);
|
||
+
|
||
+ return chan;
|
||
+}
|
||
+
|
||
+static int at_xdmac_set_slave_config(struct dma_chan *chan,
|
||
+ struct dma_slave_config *sconfig)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ u8 dwidth;
|
||
+ int csize;
|
||
+
|
||
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
|
||
+ AT91_XDMAC_DT_PERID(atchan->perid)
|
||
+ | AT_XDMAC_CC_DAM_INCREMENTED_AM
|
||
+ | AT_XDMAC_CC_SAM_FIXED_AM
|
||
+ | AT_XDMAC_CC_DIF(atchan->memif)
|
||
+ | AT_XDMAC_CC_SIF(atchan->perif)
|
||
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
|
||
+ | AT_XDMAC_CC_DSYNC_PER2MEM
|
||
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
|
||
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
|
||
+ csize = at_xdmac_csize(sconfig->src_maxburst);
|
||
+ if (csize < 0) {
|
||
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
|
||
+ dwidth = ffs(sconfig->src_addr_width) - 1;
|
||
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
|
||
+
|
||
+
|
||
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
|
||
+ AT91_XDMAC_DT_PERID(atchan->perid)
|
||
+ | AT_XDMAC_CC_DAM_FIXED_AM
|
||
+ | AT_XDMAC_CC_SAM_INCREMENTED_AM
|
||
+ | AT_XDMAC_CC_DIF(atchan->perif)
|
||
+ | AT_XDMAC_CC_SIF(atchan->memif)
|
||
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
|
||
+ | AT_XDMAC_CC_DSYNC_MEM2PER
|
||
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
|
||
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
|
||
+ csize = at_xdmac_csize(sconfig->dst_maxburst);
|
||
+ if (csize < 0) {
|
||
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
|
||
+ dwidth = ffs(sconfig->dst_addr_width) - 1;
|
||
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
|
||
+
|
||
+ /* Src and dst addr are needed to configure the link list descriptor. */
|
||
+ atchan->per_src_addr = sconfig->src_addr;
|
||
+ atchan->per_dst_addr = sconfig->dst_addr;
|
||
+
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
|
||
+ __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
|
||
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
|
||
+ atchan->per_src_addr, atchan->per_dst_addr);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||
+ unsigned int sg_len, enum dma_transfer_direction direction,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
|
||
+ struct scatterlist *sg;
|
||
+ int i;
|
||
+ unsigned int xfer_size = 0;
|
||
+
|
||
+ if (!sgl)
|
||
+ return NULL;
|
||
+
|
||
+ if (!is_slave_direction(direction)) {
|
||
+ dev_err(chan2dev(chan), "invalid DMA direction\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
|
||
+ __func__, sg_len,
|
||
+ direction == DMA_MEM_TO_DEV ? "to device" : "from device",
|
||
+ flags);
|
||
+
|
||
+ /* Protect dma_sconfig field that can be modified by set_slave_conf. */
|
||
+ spin_lock(&atchan->lock);
|
||
+
|
||
+ /* Prepare descriptors. */
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ struct at_xdmac_desc *desc = NULL;
|
||
+ u32 len, mem, dwidth, fixed_dwidth;
|
||
+
|
||
+ len = sg_dma_len(sg);
|
||
+ mem = sg_dma_address(sg);
|
||
+ if (unlikely(!len)) {
|
||
+ dev_err(chan2dev(chan), "sg data length is zero\n");
|
||
+ spin_unlock(&atchan->lock);
|
||
+ return NULL;
|
||
+ }
|
||
+ dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
|
||
+ __func__, i, len, mem);
|
||
+
|
||
+ desc = at_xdmac_get_desc(atchan);
|
||
+ if (!desc) {
|
||
+ dev_err(chan2dev(chan), "can't get descriptor\n");
|
||
+ if (first)
|
||
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
|
||
+ spin_unlock(&atchan->lock);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Linked list descriptor setup. */
|
||
+ if (direction == DMA_DEV_TO_MEM) {
|
||
+ desc->lld.mbr_sa = atchan->per_src_addr;
|
||
+ desc->lld.mbr_da = mem;
|
||
+ desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
|
||
+ } else {
|
||
+ desc->lld.mbr_sa = mem;
|
||
+ desc->lld.mbr_da = atchan->per_dst_addr;
|
||
+ desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
|
||
+ }
|
||
+ dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
|
||
+ fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
|
||
+ ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
|
||
+ : AT_XDMAC_CC_DWIDTH_BYTE;
|
||
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
|
||
+ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
|
||
+ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
|
||
+ | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
|
||
+ | (len >> fixed_dwidth); /* microblock length */
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
|
||
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
|
||
+
|
||
+ /* Chain lld. */
|
||
+ if (prev) {
|
||
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
|
||
+ __func__, prev, &prev->lld.mbr_nda);
|
||
+ }
|
||
+
|
||
+ prev = desc;
|
||
+ if (!first)
|
||
+ first = desc;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
|
||
+ __func__, desc, first);
|
||
+ list_add_tail(&desc->desc_node, &first->descs_list);
|
||
+ }
|
||
+
|
||
+ spin_unlock(&atchan->lock);
|
||
+
|
||
+ first->tx_dma_desc.cookie = -EBUSY;
|
||
+ first->tx_dma_desc.flags = flags;
|
||
+ first->xfer_size = xfer_size;
|
||
+ first->direction = direction;
|
||
+
|
||
+ return &first->tx_dma_desc;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
|
||
+ size_t buf_len, size_t period_len,
|
||
+ enum dma_transfer_direction direction,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
|
||
+ unsigned int periods = buf_len / period_len;
|
||
+ unsigned long lock_flags;
|
||
+ int i;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
|
||
+ __func__, &buf_addr, buf_len, period_len,
|
||
+ direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
|
||
+
|
||
+ if (!is_slave_direction(direction)) {
|
||
+ dev_err(chan2dev(chan), "invalid DMA direction\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
|
||
+ dev_err(chan2dev(chan), "channel currently used\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ for (i = 0; i < periods; i++) {
|
||
+ struct at_xdmac_desc *desc = NULL;
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, lock_flags);
|
||
+ desc = at_xdmac_get_desc(atchan);
|
||
+ if (!desc) {
|
||
+ dev_err(chan2dev(chan), "can't get descriptor\n");
|
||
+ if (first)
|
||
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
|
||
+ spin_unlock_irqrestore(&atchan->lock, lock_flags);
|
||
+ return NULL;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&atchan->lock, lock_flags);
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
|
||
+ __func__, desc, &desc->tx_dma_desc.phys);
|
||
+
|
||
+ if (direction == DMA_DEV_TO_MEM) {
|
||
+ desc->lld.mbr_sa = atchan->per_src_addr;
|
||
+ desc->lld.mbr_da = buf_addr + i * period_len;
|
||
+ desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
|
||
+ } else {
|
||
+ desc->lld.mbr_sa = buf_addr + i * period_len;
|
||
+ desc->lld.mbr_da = atchan->per_dst_addr;
|
||
+ desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
|
||
+ }
|
||
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
|
||
+ | AT_XDMAC_MBR_UBC_NDEN
|
||
+ | AT_XDMAC_MBR_UBC_NSEN
|
||
+ | AT_XDMAC_MBR_UBC_NDE
|
||
+ | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
|
||
+
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
|
||
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
|
||
+
|
||
+ /* Chain lld. */
|
||
+ if (prev) {
|
||
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
|
||
+ __func__, prev, &prev->lld.mbr_nda);
|
||
+ }
|
||
+
|
||
+ prev = desc;
|
||
+ if (!first)
|
||
+ first = desc;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
|
||
+ __func__, desc, first);
|
||
+ list_add_tail(&desc->desc_node, &first->descs_list);
|
||
+ }
|
||
+
|
||
+ prev->lld.mbr_nda = first->tx_dma_desc.phys;
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
|
||
+ __func__, prev, &prev->lld.mbr_nda);
|
||
+ first->tx_dma_desc.flags = flags;
|
||
+ first->xfer_size = buf_len;
|
||
+ first->direction = direction;
|
||
+
|
||
+ return &first->tx_dma_desc;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
||
+ size_t len, unsigned long flags)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
|
||
+ size_t remaining_size = len, xfer_size = 0, ublen;
|
||
+ dma_addr_t src_addr = src, dst_addr = dest;
|
||
+ u32 dwidth;
|
||
+ /*
|
||
+ * WARNING: We don't know the direction, it involves we can't
|
||
+ * dynamically set the source and dest interface so we have to use the
|
||
+ * same one. Only interface 0 allows EBI access. Hopefully we can
|
||
+ * access DDR through both ports (at least on SAMA5D4x), so we can use
|
||
+ * the same interface for source and dest, that solves the fact we
|
||
+ * don't know the direction.
|
||
+ */
|
||
+ u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
|
||
+ | AT_XDMAC_CC_SAM_INCREMENTED_AM
|
||
+ | AT_XDMAC_CC_DIF(0)
|
||
+ | AT_XDMAC_CC_SIF(0)
|
||
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
|
||
+ | AT_XDMAC_CC_TYPE_MEM_TRAN;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
|
||
+ __func__, &src, &dest, len, flags);
|
||
+
|
||
+ if (unlikely(!len))
|
||
+ return NULL;
|
||
+
|
||
+ /*
|
||
+ * Check address alignment to select the greater data width we can use.
|
||
+ * Some XDMAC implementations don't provide dword transfer, in this
|
||
+ * case selecting dword has the same behavior as selecting word transfers.
|
||
+ */
|
||
+ if (!((src_addr | dst_addr) & 7)) {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
|
||
+ } else if (!((src_addr | dst_addr) & 3)) {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_WORD;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
|
||
+ } else if (!((src_addr | dst_addr) & 1)) {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
|
||
+ } else {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
|
||
+ }
|
||
+
|
||
+ /* Prepare descriptors. */
|
||
+ while (remaining_size) {
|
||
+ struct at_xdmac_desc *desc = NULL;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, flags);
|
||
+ desc = at_xdmac_get_desc(atchan);
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+ if (!desc) {
|
||
+ dev_err(chan2dev(chan), "can't get descriptor\n");
|
||
+ if (first)
|
||
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Update src and dest addresses. */
|
||
+ src_addr += xfer_size;
|
||
+ dst_addr += xfer_size;
|
||
+
|
||
+ if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
|
||
+ xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
|
||
+ else
|
||
+ xfer_size = remaining_size;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
|
||
+
|
||
+ /* Check remaining length and change data width if needed. */
|
||
+ if (!((src_addr | dst_addr | xfer_size) & 7)) {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
|
||
+ } else if (!((src_addr | dst_addr | xfer_size) & 3)) {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_WORD;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
|
||
+ } else if (!((src_addr | dst_addr | xfer_size) & 1)) {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
|
||
+ } else if ((src_addr | dst_addr | xfer_size) & 1) {
|
||
+ dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
|
||
+ dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
|
||
+ }
|
||
+ chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
|
||
+
|
||
+ ublen = xfer_size >> dwidth;
|
||
+ remaining_size -= xfer_size;
|
||
+
|
||
+ desc->lld.mbr_sa = src_addr;
|
||
+ desc->lld.mbr_da = dst_addr;
|
||
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
|
||
+ | AT_XDMAC_MBR_UBC_NDEN
|
||
+ | AT_XDMAC_MBR_UBC_NSEN
|
||
+ | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0)
|
||
+ | ublen;
|
||
+ desc->lld.mbr_cfg = chan_cc;
|
||
+
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
|
||
+
|
||
+ /* Chain lld. */
|
||
+ if (prev) {
|
||
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n",
|
||
+ __func__, prev, prev->lld.mbr_nda);
|
||
+ }
|
||
+
|
||
+ prev = desc;
|
||
+ if (!first)
|
||
+ first = desc;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
|
||
+ __func__, desc, first);
|
||
+ list_add_tail(&desc->desc_node, &first->descs_list);
|
||
+ }
|
||
+
|
||
+ first->tx_dma_desc.flags = flags;
|
||
+ first->xfer_size = len;
|
||
+
|
||
+ return &first->tx_dma_desc;
|
||
+}
|
||
+
|
||
+static enum dma_status
|
||
+at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||
+ struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
||
+ struct at_xdmac_desc *desc, *_desc;
|
||
+ struct list_head *descs_list;
|
||
+ unsigned long flags;
|
||
+ enum dma_status ret;
|
||
+ int residue;
|
||
+ u32 cur_nda, mask, value;
|
||
+ u8 dwidth = 0;
|
||
+
|
||
+ ret = dma_cookie_status(chan, cookie, txstate);
|
||
+ if (ret == DMA_SUCCESS)
|
||
+ return ret;
|
||
+
|
||
+ if (!txstate)
|
||
+ return ret;
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, flags);
|
||
+
|
||
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
|
||
+
|
||
+ /*
|
||
+ * If the transfer has not been started yet, don't need to compute the
|
||
+ * residue, it's the transfer length.
|
||
+ */
|
||
+ if (!desc->active_xfer) {
|
||
+ dma_set_residue(txstate, desc->xfer_size);
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ residue = desc->xfer_size;
|
||
+
|
||
+ /*
|
||
+ * Flush FIFO: only relevant when the transfer is source peripheral
|
||
+ * synchronized.
|
||
+ */
|
||
+ mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
|
||
+ value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
|
||
+ if ((desc->lld.mbr_cfg & mask) == value) {
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
|
||
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
|
||
+ cpu_relax();
|
||
+ }
|
||
+
|
||
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
|
||
+ /*
|
||
+ * Remove size of all microblocks already transferred and the current
|
||
+ * one. Then add the remaining size to transfer of the current
|
||
+ * microblock.
|
||
+ */
|
||
+ descs_list = &desc->descs_list;
|
||
+ list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
|
||
+ dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
|
||
+ residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
|
||
+ if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
|
||
+ break;
|
||
+ }
|
||
+ residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
|
||
+
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+
|
||
+ dma_set_residue(txstate, residue);
|
||
+
|
||
+ dev_dbg(chan2dev(chan),
|
||
+ "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
|
||
+ __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/* Call must be protected by lock. */
|
||
+static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
|
||
+ struct at_xdmac_desc *desc)
|
||
+{
|
||
+ dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||
+
|
||
+ /*
|
||
+ * Remove the transfer from the transfer list then move the transfer
|
||
+ * descriptors into the free descriptors list.
|
||
+ */
|
||
+ list_del(&desc->xfer_node);
|
||
+ list_splice_init(&desc->descs_list, &atchan->free_descs_list);
|
||
+}
|
||
+
|
||
+static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
|
||
+{
|
||
+ struct at_xdmac_desc *desc;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, flags);
|
||
+
|
||
+ /*
|
||
+ * If channel is enabled, do nothing, advance_work will be triggered
|
||
+ * after the interruption.
|
||
+ */
|
||
+ if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
|
||
+ desc = list_first_entry(&atchan->xfers_list,
|
||
+ struct at_xdmac_desc,
|
||
+ xfer_node);
|
||
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||
+ if (!desc->active_xfer)
|
||
+ at_xdmac_start_xfer(atchan, desc);
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+}
|
||
+
|
||
+static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
||
+{
|
||
+ struct at_xdmac_desc *desc;
|
||
+ struct dma_async_tx_descriptor *txd;
|
||
+
|
||
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
|
||
+ txd = &desc->tx_dma_desc;
|
||
+
|
||
+ if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
|
||
+ txd->callback(txd->callback_param);
|
||
+}
|
||
+
|
||
+static void at_xdmac_tasklet(unsigned long data)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
|
||
+ struct at_xdmac_desc *desc;
|
||
+ u32 error_mask;
|
||
+ unsigned long flags;
|
||
+
|
||
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
|
||
+ __func__, atchan->status);
|
||
+
|
||
+ error_mask = AT_XDMAC_CIS_RBEIS
|
||
+ | AT_XDMAC_CIS_WBEIS
|
||
+ | AT_XDMAC_CIS_ROIS;
|
||
+
|
||
+ if (at_xdmac_chan_is_cyclic(atchan)) {
|
||
+ at_xdmac_handle_cyclic(atchan);
|
||
+ } else if ((atchan->status & AT_XDMAC_CIS_LIS)
|
||
+ || (atchan->status & error_mask)) {
|
||
+ struct dma_async_tx_descriptor *txd;
|
||
+
|
||
+ if (atchan->status & AT_XDMAC_CIS_RBEIS)
|
||
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
|
||
+ if (atchan->status & AT_XDMAC_CIS_WBEIS)
|
||
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
|
||
+ if (atchan->status & AT_XDMAC_CIS_ROIS)
|
||
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, flags);
|
||
+ desc = list_first_entry(&atchan->xfers_list,
|
||
+ struct at_xdmac_desc,
|
||
+ xfer_node);
|
||
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||
+ BUG_ON(!desc->active_xfer);
|
||
+
|
||
+ txd = &desc->tx_dma_desc;
|
||
+
|
||
+ at_xdmac_remove_xfer(atchan, desc);
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+
|
||
+ if (!at_xdmac_chan_is_cyclic(atchan)) {
|
||
+ dma_cookie_complete(txd);
|
||
+ if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
|
||
+ txd->callback(txd->callback_param);
|
||
+ }
|
||
+
|
||
+ dma_run_dependencies(txd);
|
||
+
|
||
+ at_xdmac_advance_work(atchan);
|
||
+ }
|
||
+}
|
||
+
|
||
+static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
|
||
+{
|
||
+ struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
|
||
+ struct at_xdmac_chan *atchan;
|
||
+ u32 imr, status, pending;
|
||
+ u32 chan_imr, chan_status;
|
||
+ int i, ret = IRQ_NONE;
|
||
+
|
||
+ do {
|
||
+ imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
|
||
+ status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
|
||
+ pending = status & imr;
|
||
+
|
||
+ dev_vdbg(atxdmac->dma.dev,
|
||
+ "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
|
||
+ __func__, status, imr, pending);
|
||
+
|
||
+ if (!pending)
|
||
+ break;
|
||
+
|
||
+ /* We have to find which channel has generated the interrupt. */
|
||
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
|
||
+ if (!((1 << i) & pending))
|
||
+ continue;
|
||
+
|
||
+ atchan = &atxdmac->chan[i];
|
||
+ chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
|
||
+ chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
|
||
+ atchan->status = chan_status & chan_imr;
|
||
+ dev_vdbg(atxdmac->dma.dev,
|
||
+ "%s: chan%d: imr=0x%x, status=0x%x\n",
|
||
+ __func__, i, chan_imr, chan_status);
|
||
+ dev_vdbg(chan2dev(&atchan->chan),
|
||
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
|
||
+ __func__,
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CC),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
|
||
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
|
||
+
|
||
+ if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
|
||
+
|
||
+ tasklet_schedule(&atchan->tasklet);
|
||
+ ret = IRQ_HANDLED;
|
||
+ }
|
||
+
|
||
+ } while (pending);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void at_xdmac_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+
|
||
+ dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
|
||
+
|
||
+ if (!at_xdmac_chan_is_cyclic(atchan))
|
||
+ at_xdmac_advance_work(atchan);
|
||
+
|
||
+ return;
|
||
+}
|
||
+
|
||
+static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct at_xdmac_desc *desc, *_desc;
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
||
+ unsigned long flags;
|
||
+ int ret = 0;
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd);
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, flags);
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_PAUSE:
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
|
||
+ set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
|
||
+ break;
|
||
+ case DMA_RESUME:
|
||
+ if (!at_xdmac_chan_is_paused(atchan))
|
||
+ break;
|
||
+
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
|
||
+ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
|
||
+ break;
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
|
||
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
|
||
+ cpu_relax();
|
||
+
|
||
+ /* Cancel all pending transfers. */
|
||
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
|
||
+ at_xdmac_remove_xfer(atchan, desc);
|
||
+
|
||
+ clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
|
||
+ break;
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ ret = at_xdmac_set_slave_config(chan,
|
||
+ (struct dma_slave_config *)arg);
|
||
+ break;
|
||
+ default:
|
||
+ dev_err(chan2dev(chan),
|
||
+ "unmanaged or unknown dma control cmd: %d\n", cmd);
|
||
+ ret = -ENXIO;
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ struct at_xdmac_desc *desc;
|
||
+ unsigned long flags;
|
||
+ int i;
|
||
+
|
||
+ spin_lock_irqsave(&atchan->lock, flags);
|
||
+
|
||
+ if (at_xdmac_chan_is_enabled(atchan)) {
|
||
+ dev_err(chan2dev(chan),
|
||
+ "can't allocate channel resources (channel enabled)\n");
|
||
+ i = -EIO;
|
||
+ goto spin_unlock;
|
||
+ }
|
||
+
|
||
+ if (!list_empty(&atchan->free_descs_list)) {
|
||
+ dev_err(chan2dev(chan),
|
||
+ "can't allocate channel resources (channel not free from a previous use)\n");
|
||
+ i = -EIO;
|
||
+ goto spin_unlock;
|
||
+ }
|
||
+
|
||
+ for (i = 0; i < init_nr_desc_per_channel; i++) {
|
||
+ desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(chan),
|
||
+ "only %d descriptors have been allocated\n", i);
|
||
+ break;
|
||
+ }
|
||
+ list_add_tail(&desc->desc_node, &atchan->free_descs_list);
|
||
+ }
|
||
+
|
||
+ dma_cookie_init(chan);
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
|
||
+
|
||
+spin_unlock:
|
||
+ spin_unlock_irqrestore(&atchan->lock, flags);
|
||
+ return i;
|
||
+}
|
||
+
|
||
+static void at_xdmac_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+ struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
|
||
+ struct at_xdmac_desc *desc, *_desc;
|
||
+
|
||
+ list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
|
||
+ dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
|
||
+ list_del(&desc->desc_node);
|
||
+ dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
|
||
+ }
|
||
+
|
||
+ return;
|
||
+}
|
||
+
|
||
+#ifdef CONFIG_PM
|
||
+static int atmel_xdmac_prepare(struct device *dev)
|
||
+{
|
||
+ struct platform_device *pdev = to_platform_device(dev);
|
||
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
|
||
+ struct dma_chan *chan, *_chan;
|
||
+
|
||
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+
|
||
+ /* Wait for transfer completion, except in cyclic case. */
|
||
+ if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
|
||
+ return -EAGAIN;
|
||
+ }
|
||
+ return 0;
|
||
+}
|
||
+#else
|
||
+# define atmel_xdmac_prepare NULL
|
||
+#endif
|
||
+
|
||
+#ifdef CONFIG_PM_SLEEP
|
||
+static int atmel_xdmac_suspend(struct device *dev)
|
||
+{
|
||
+ struct platform_device *pdev = to_platform_device(dev);
|
||
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
|
||
+ struct dma_chan *chan, *_chan;
|
||
+
|
||
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
|
||
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||
+
|
||
+ atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
|
||
+ if (at_xdmac_chan_is_cyclic(atchan)) {
|
||
+ if (!at_xdmac_chan_is_paused(atchan))
|
||
+ at_xdmac_control(chan, DMA_PAUSE, 0);
|
||
+ atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
|
||
+ atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
|
||
+ atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
|
||
+ }
|
||
+ }
|
||
+ atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
|
||
+
|
||
+ at_xdmac_off(atxdmac);
|
||
+ clk_disable_unprepare(atxdmac->clk);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int atmel_xdmac_resume(struct device *dev)
|
||
+{
|
||
+ struct platform_device *pdev = to_platform_device(dev);
|
||
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
|
||
+ struct at_xdmac_chan *atchan;
|
||
+ struct dma_chan *chan, *_chan;
|
||
+ int i;
|
||
+
|
||
+ clk_prepare_enable(atxdmac->clk);
|
||
+
|
||
+ /* Clear pending interrupts. */
|
||
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
|
||
+ atchan = &atxdmac->chan[i];
|
||
+ while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
|
||
+ cpu_relax();
|
||
+ }
|
||
+
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
|
||
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
|
||
+ atchan = to_at_xdmac_chan(chan);
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
|
||
+ if (at_xdmac_chan_is_cyclic(atchan)) {
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
|
||
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
|
||
+ wmb();
|
||
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
||
+ }
|
||
+ }
|
||
+ return 0;
|
||
+}
|
||
+#endif /* CONFIG_PM_SLEEP */
|
||
+
|
||
+static int at_xdmac_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct resource *res;
|
||
+ struct at_xdmac *atxdmac;
|
||
+ int irq, size, nr_channels, i, ret;
|
||
+ void __iomem *base;
|
||
+ u32 reg;
|
||
+
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ if (!res)
|
||
+ return -EINVAL;
|
||
+
|
||
+ irq = platform_get_irq(pdev, 0);
|
||
+ if (irq < 0)
|
||
+ return irq;
|
||
+
|
||
+ base = devm_ioremap_resource(&pdev->dev, res);
|
||
+ if (IS_ERR(base))
|
||
+ return PTR_ERR(base);
|
||
+
|
||
+ /*
|
||
+ * Read number of xdmac channels, read helper function can't be used
|
||
+ * since atxdmac is not yet allocated and we need to know the number
|
||
+ * of channels to do the allocation.
|
||
+ */
|
||
+ reg = readl_relaxed(base + AT_XDMAC_GTYPE);
|
||
+ nr_channels = AT_XDMAC_NB_CH(reg);
|
||
+ if (nr_channels > AT_XDMAC_MAX_CHAN) {
|
||
+ dev_err(&pdev->dev, "invalid number of channels (%u)\n",
|
||
+ nr_channels);
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ size = sizeof(*atxdmac);
|
||
+ size += nr_channels * sizeof(struct at_xdmac_chan);
|
||
+ atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
|
||
+ if (!atxdmac) {
|
||
+ dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ atxdmac->regs = base;
|
||
+ atxdmac->irq = irq;
|
||
+
|
||
+ atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
|
||
+ if (IS_ERR(atxdmac->clk)) {
|
||
+ dev_err(&pdev->dev, "can't get dma_clk\n");
|
||
+ return PTR_ERR(atxdmac->clk);
|
||
+ }
|
||
+
|
||
+ /* Do not use dev res to prevent races with tasklet */
|
||
+ ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
|
||
+ if (ret) {
|
||
+ dev_err(&pdev->dev, "can't request irq\n");
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ ret = clk_prepare_enable(atxdmac->clk);
|
||
+ if (ret) {
|
||
+ dev_err(&pdev->dev, "can't prepare or enable clock\n");
|
||
+ goto err_free_irq;
|
||
+ }
|
||
+
|
||
+ atxdmac->at_xdmac_desc_pool =
|
||
+ dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
|
||
+ sizeof(struct at_xdmac_desc), 4, 0);
|
||
+ if (!atxdmac->at_xdmac_desc_pool) {
|
||
+ dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
|
||
+ ret = -ENOMEM;
|
||
+ goto err_clk_disable;
|
||
+ }
|
||
+
|
||
+ dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
|
||
+ dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
|
||
+ dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
|
||
+ /*
|
||
+ * Without DMA_PRIVATE the driver is not able to allocate more than
|
||
+ * one channel, second allocation fails in private_candidate.
|
||
+ */
|
||
+ dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
|
||
+ atxdmac->dma.dev = &pdev->dev;
|
||
+ atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
|
||
+ atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
|
||
+ atxdmac->dma.device_tx_status = at_xdmac_tx_status;
|
||
+ atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
|
||
+ atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
|
||
+ atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
|
||
+ atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
|
||
+ atxdmac->dma.device_control = at_xdmac_control;
|
||
+ atxdmac->dma.chancnt = nr_channels;
|
||
+
|
||
+ /* Disable all chans and interrupts. */
|
||
+ at_xdmac_off(atxdmac);
|
||
+
|
||
+ /* Init channels. */
|
||
+ INIT_LIST_HEAD(&atxdmac->dma.channels);
|
||
+ for (i = 0; i < nr_channels; i++) {
|
||
+ struct at_xdmac_chan *atchan = &atxdmac->chan[i];
|
||
+
|
||
+ atchan->chan.device = &atxdmac->dma;
|
||
+ list_add_tail(&atchan->chan.device_node,
|
||
+ &atxdmac->dma.channels);
|
||
+
|
||
+ atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
|
||
+ atchan->mask = 1 << i;
|
||
+
|
||
+ spin_lock_init(&atchan->lock);
|
||
+ INIT_LIST_HEAD(&atchan->xfers_list);
|
||
+ INIT_LIST_HEAD(&atchan->free_descs_list);
|
||
+ tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
|
||
+ (unsigned long)atchan);
|
||
+
|
||
+ /* Clear pending interrupts. */
|
||
+ while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
|
||
+ cpu_relax();
|
||
+ }
|
||
+ platform_set_drvdata(pdev, atxdmac);
|
||
+
|
||
+ ret = dma_async_device_register(&atxdmac->dma);
|
||
+ if (ret) {
|
||
+ dev_err(&pdev->dev, "fail to register DMA engine device\n");
|
||
+ goto err_clk_disable;
|
||
+ }
|
||
+
|
||
+ ret = of_dma_controller_register(pdev->dev.of_node,
|
||
+ at_xdmac_xlate, atxdmac);
|
||
+ if (ret) {
|
||
+ dev_err(&pdev->dev, "could not register of dma controller\n");
|
||
+ goto err_dma_unregister;
|
||
+ }
|
||
+
|
||
+ dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
|
||
+ nr_channels, atxdmac->regs);
|
||
+
|
||
+ return 0;
|
||
+
|
||
+err_dma_unregister:
|
||
+ dma_async_device_unregister(&atxdmac->dma);
|
||
+err_clk_disable:
|
||
+ clk_disable_unprepare(atxdmac->clk);
|
||
+err_free_irq:
|
||
+ free_irq(atxdmac->irq, atxdmac->dma.dev);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int at_xdmac_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
|
||
+ int i;
|
||
+
|
||
+ at_xdmac_off(atxdmac);
|
||
+ of_dma_controller_free(pdev->dev.of_node);
|
||
+ dma_async_device_unregister(&atxdmac->dma);
|
||
+ clk_disable_unprepare(atxdmac->clk);
|
||
+
|
||
+ synchronize_irq(atxdmac->irq);
|
||
+
|
||
+ free_irq(atxdmac->irq, atxdmac->dma.dev);
|
||
+
|
||
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
|
||
+ struct at_xdmac_chan *atchan = &atxdmac->chan[i];
|
||
+
|
||
+ tasklet_kill(&atchan->tasklet);
|
||
+ at_xdmac_free_chan_resources(&atchan->chan);
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
|
||
+ .prepare = atmel_xdmac_prepare,
|
||
+ .suspend_late = atmel_xdmac_suspend,
|
||
+ .resume_early = atmel_xdmac_resume,
|
||
+ .freeze_late = atmel_xdmac_suspend,
|
||
+ .thaw_early = atmel_xdmac_resume,
|
||
+ .poweroff_late = atmel_xdmac_suspend,
|
||
+ .restore_early = atmel_xdmac_resume,
|
||
+};
|
||
+
|
||
+static const struct of_device_id atmel_xdmac_dt_ids[] = {
|
||
+ {
|
||
+ .compatible = "atmel,sama5d4-dma",
|
||
+ }, {
|
||
+ /* sentinel */
|
||
+ }
|
||
+};
|
||
+MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
|
||
+
|
||
+static struct platform_driver at_xdmac_driver = {
|
||
+ .probe = at_xdmac_probe,
|
||
+ .remove = at_xdmac_remove,
|
||
+ .driver = {
|
||
+ .name = "at_xdmac",
|
||
+ .owner = THIS_MODULE,
|
||
+ .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
|
||
+ .pm = &atmel_xdmac_dev_pm_ops,
|
||
+ }
|
||
+};
|
||
+
|
||
+static int __init at_xdmac_init(void)
|
||
+{
|
||
+ return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
|
||
+}
|
||
+subsys_initcall(at_xdmac_init);
|
||
+
|
||
+MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
|
||
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/ata.c linux-3.0.101.xm510/drivers/dma/bestcomm/ata.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/ata.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/ata.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,157 @@
|
||
+/*
|
||
+ * Bestcomm ATA task driver
|
||
+ *
|
||
+ *
|
||
+ * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com>
|
||
+ * 2003-2004 (c) MontaVista, Software, Inc.
|
||
+ *
|
||
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
|
||
+ * Copyright (C) 2006 Freescale - John Rigby
|
||
+ *
|
||
+ * This file is licensed under the terms of the GNU General Public License
|
||
+ * version 2. This program is licensed "as is" without any warranty of any
|
||
+ * kind, whether express or implied.
|
||
+ */
|
||
+
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/types.h>
|
||
+#include <asm/io.h>
|
||
+
|
||
+#include <linux/fsl/bestcomm/bestcomm.h>
|
||
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
|
||
+#include <linux/fsl/bestcomm/ata.h>
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Task image/var/inc */
|
||
+/* ======================================================================== */
|
||
+
|
||
+/* ata task image */
|
||
+extern u32 bcom_ata_task[];
|
||
+
|
||
+/* ata task vars that need to be set before enabling the task */
|
||
+struct bcom_ata_var {
|
||
+ u32 enable; /* (u16*) address of task's control register */
|
||
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
|
||
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
|
||
+ u32 bd_start; /* (struct bcom_bd*) current bd */
|
||
+ u32 buffer_size; /* size of receive buffer */
|
||
+};
|
||
+
|
||
+/* ata task incs that need to be set before enabling the task */
|
||
+struct bcom_ata_inc {
|
||
+ u16 pad0;
|
||
+ s16 incr_bytes;
|
||
+ u16 pad1;
|
||
+ s16 incr_dst;
|
||
+ u16 pad2;
|
||
+ s16 incr_src;
|
||
+};
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Task support code */
|
||
+/* ======================================================================== */
|
||
+
|
||
+struct bcom_task *
|
||
+bcom_ata_init(int queue_len, int maxbufsize)
|
||
+{
|
||
+ struct bcom_task *tsk;
|
||
+ struct bcom_ata_var *var;
|
||
+ struct bcom_ata_inc *inc;
|
||
+
|
||
+ /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
|
||
+ bcom_disable_prefetch();
|
||
+
|
||
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
|
||
+ if (!tsk)
|
||
+ return NULL;
|
||
+
|
||
+ tsk->flags = BCOM_FLAGS_NONE;
|
||
+
|
||
+ bcom_ata_reset_bd(tsk);
|
||
+
|
||
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
|
||
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
|
||
+
|
||
+ if (bcom_load_image(tsk->tasknum, bcom_ata_task)) {
|
||
+ bcom_task_free(tsk);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ var->enable = bcom_eng->regs_base +
|
||
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
|
||
+ var->bd_base = tsk->bd_pa;
|
||
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
|
||
+ var->bd_start = tsk->bd_pa;
|
||
+ var->buffer_size = maxbufsize;
|
||
+
|
||
+ /* Configure some stuff */
|
||
+ bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA);
|
||
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
|
||
+
|
||
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX);
|
||
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX);
|
||
+
|
||
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
|
||
+
|
||
+ return tsk;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_ata_init);
|
||
+
|
||
+void bcom_ata_rx_prepare(struct bcom_task *tsk)
|
||
+{
|
||
+ struct bcom_ata_inc *inc;
|
||
+
|
||
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
|
||
+
|
||
+ inc->incr_bytes = -(s16)sizeof(u32);
|
||
+ inc->incr_src = 0;
|
||
+ inc->incr_dst = sizeof(u32);
|
||
+
|
||
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare);
|
||
+
|
||
+void bcom_ata_tx_prepare(struct bcom_task *tsk)
|
||
+{
|
||
+ struct bcom_ata_inc *inc;
|
||
+
|
||
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
|
||
+
|
||
+ inc->incr_bytes = -(s16)sizeof(u32);
|
||
+ inc->incr_src = sizeof(u32);
|
||
+ inc->incr_dst = 0;
|
||
+
|
||
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare);
|
||
+
|
||
+void bcom_ata_reset_bd(struct bcom_task *tsk)
|
||
+{
|
||
+ struct bcom_ata_var *var;
|
||
+
|
||
+ /* Reset all BD */
|
||
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||
+
|
||
+ tsk->index = 0;
|
||
+ tsk->outdex = 0;
|
||
+
|
||
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
|
||
+ var->bd_start = var->bd_base;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_ata_reset_bd);
|
||
+
|
||
+void bcom_ata_release(struct bcom_task *tsk)
|
||
+{
|
||
+ /* Nothing special for the ATA tasks */
|
||
+ bcom_task_free(tsk);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_ata_release);
|
||
+
|
||
+
|
||
+MODULE_DESCRIPTION("BestComm ATA task driver");
|
||
+MODULE_AUTHOR("John Rigby");
|
||
+MODULE_LICENSE("GPL v2");
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/bcom_ata_task.c linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_ata_task.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/bcom_ata_task.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_ata_task.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,67 @@
|
||
+/*
|
||
+ * Bestcomm ATA task microcode
|
||
+ *
|
||
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published
|
||
+ * by the Free Software Foundation.
|
||
+ *
|
||
+ * Created based on bestcom/code_dma/image_rtos1/dma_image.hex
|
||
+ */
|
||
+
|
||
+#include <asm/types.h>
|
||
+
|
||
+/*
|
||
+ * The header consists of the following fields:
|
||
+ * u32 magic;
|
||
+ * u8 desc_size;
|
||
+ * u8 var_size;
|
||
+ * u8 inc_size;
|
||
+ * u8 first_var;
|
||
+ * u8 reserved[8];
|
||
+ *
|
||
+ * The size fields contain the number of 32-bit words.
|
||
+ */
|
||
+
|
||
+u32 bcom_ata_task[] = {
|
||
+ /* header */
|
||
+ 0x4243544b,
|
||
+ 0x0e060709,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+
|
||
+ /* Task descriptors */
|
||
+ 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */
|
||
+ 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */
|
||
+ 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */
|
||
+ 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */
|
||
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
|
||
+ 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */
|
||
+ 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */
|
||
+ 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */
|
||
+ 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */
|
||
+ 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */
|
||
+ 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */
|
||
+ 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */
|
||
+ 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */
|
||
+ 0x000001f8, /* NOP */
|
||
+
|
||
+ /* VAR[9]-VAR[14] */
|
||
+ 0x40000000,
|
||
+ 0x7fff7fff,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+
|
||
+ /* INC[0]-INC[6] */
|
||
+ 0x40000000,
|
||
+ 0xe0000000,
|
||
+ 0xe0000000,
|
||
+ 0xa000000c,
|
||
+ 0x20000000,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+};
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/bcom_fec_rx_task.c linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_fec_rx_task.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/bcom_fec_rx_task.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_fec_rx_task.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,78 @@
|
||
+/*
|
||
+ * Bestcomm FEC RX task microcode
|
||
+ *
|
||
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published
|
||
+ * by the Free Software Foundation.
|
||
+ *
|
||
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
|
||
+ * on Tue Mar 22 11:19:38 2005 GMT
|
||
+ */
|
||
+
|
||
+#include <asm/types.h>
|
||
+
|
||
+/*
|
||
+ * The header consists of the following fields:
|
||
+ * u32 magic;
|
||
+ * u8 desc_size;
|
||
+ * u8 var_size;
|
||
+ * u8 inc_size;
|
||
+ * u8 first_var;
|
||
+ * u8 reserved[8];
|
||
+ *
|
||
+ * The size fields contain the number of 32-bit words.
|
||
+ */
|
||
+
|
||
+u32 bcom_fec_rx_task[] = {
|
||
+ /* header */
|
||
+ 0x4243544b,
|
||
+ 0x18060709,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+
|
||
+ /* Task descriptors */
|
||
+ 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
|
||
+ 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */
|
||
+ 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */
|
||
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
|
||
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
|
||
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
|
||
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
|
||
+ 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */
|
||
+ 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */
|
||
+ 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */
|
||
+ 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */
|
||
+ 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */
|
||
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
|
||
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
|
||
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
|
||
+ 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */
|
||
+ 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */
|
||
+ 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */
|
||
+ 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */
|
||
+ 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */
|
||
+ 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */
|
||
+ 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */
|
||
+ 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */
|
||
+ 0x000001f8, /* NOP */
|
||
+
|
||
+ /* VAR[9]-VAR[14] */
|
||
+ 0x40000000,
|
||
+ 0x7fff7fff,
|
||
+ 0x00000000,
|
||
+ 0x00000003,
|
||
+ 0x40000008,
|
||
+ 0x43ffffff,
|
||
+
|
||
+ /* INC[0]-INC[6] */
|
||
+ 0x40000000,
|
||
+ 0xe0000000,
|
||
+ 0xe0000000,
|
||
+ 0xa0000008,
|
||
+ 0x20000000,
|
||
+ 0x00000000,
|
||
+ 0x4000ffff,
|
||
+};
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/bcom_fec_tx_task.c linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_fec_tx_task.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/bcom_fec_tx_task.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_fec_tx_task.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,91 @@
|
||
+/*
|
||
+ * Bestcomm FEC TX task microcode
|
||
+ *
|
||
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published
|
||
+ * by the Free Software Foundation.
|
||
+ *
|
||
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
|
||
+ * on Tue Mar 22 11:19:29 2005 GMT
|
||
+ */
|
||
+
|
||
+#include <asm/types.h>
|
||
+
|
||
+/*
|
||
+ * The header consists of the following fields:
|
||
+ * u32 magic;
|
||
+ * u8 desc_size;
|
||
+ * u8 var_size;
|
||
+ * u8 inc_size;
|
||
+ * u8 first_var;
|
||
+ * u8 reserved[8];
|
||
+ *
|
||
+ * The size fields contain the number of 32-bit words.
|
||
+ */
|
||
+
|
||
+u32 bcom_fec_tx_task[] = {
|
||
+ /* header */
|
||
+ 0x4243544b,
|
||
+ 0x2407070d,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+
|
||
+ /* Task descriptors */
|
||
+ 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */
|
||
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
|
||
+ 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */
|
||
+ 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */
|
||
+ 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */
|
||
+ 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */
|
||
+ 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */
|
||
+ 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
|
||
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
|
||
+ 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */
|
||
+ 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */
|
||
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
|
||
+ 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */
|
||
+ 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */
|
||
+ 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */
|
||
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
|
||
+ 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */
|
||
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
|
||
+ 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */
|
||
+ 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */
|
||
+ 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */
|
||
+ 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */
|
||
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
|
||
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
|
||
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
|
||
+ 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */
|
||
+ 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */
|
||
+ 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */
|
||
+ 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */
|
||
+ 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */
|
||
+ 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */
|
||
+ 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */
|
||
+ 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */
|
||
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
|
||
+ 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */
|
||
+ 0x000001f8, /* NOP */
|
||
+
|
||
+ /* VAR[13]-VAR[19] */
|
||
+ 0x0c000000,
|
||
+ 0x40000000,
|
||
+ 0x7fff7fff,
|
||
+ 0x00000000,
|
||
+ 0x00000003,
|
||
+ 0x40000004,
|
||
+ 0x43ffffff,
|
||
+
|
||
+ /* INC[0]-INC[6] */
|
||
+ 0x40000000,
|
||
+ 0xe0000000,
|
||
+ 0xe0000000,
|
||
+ 0xa0000008,
|
||
+ 0x20000000,
|
||
+ 0x00000000,
|
||
+ 0x4000ffff,
|
||
+};
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,63 @@
|
||
+/*
|
||
+ * Bestcomm GenBD RX task microcode
|
||
+ *
|
||
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
|
||
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
|
||
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published
|
||
+ * by the Free Software Foundation.
|
||
+ *
|
||
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
|
||
+ * on Tue Mar 4 10:14:12 2006 GMT
|
||
+ *
|
||
+ */
|
||
+
|
||
+#include <asm/types.h>
|
||
+
|
||
+/*
|
||
+ * The header consists of the following fields:
|
||
+ * u32 magic;
|
||
+ * u8 desc_size;
|
||
+ * u8 var_size;
|
||
+ * u8 inc_size;
|
||
+ * u8 first_var;
|
||
+ * u8 reserved[8];
|
||
+ *
|
||
+ * The size fields contain the number of 32-bit words.
|
||
+ */
|
||
+
|
||
+u32 bcom_gen_bd_rx_task[] = {
|
||
+ /* header */
|
||
+ 0x4243544b,
|
||
+ 0x0d020409,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+
|
||
+ /* Task descriptors */
|
||
+ 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */
|
||
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
|
||
+ 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */
|
||
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
|
||
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
|
||
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
|
||
+ 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */
|
||
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
|
||
+ 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */
|
||
+ 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */
|
||
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
|
||
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
|
||
+ 0x000001f8, /* NOP */
|
||
+
|
||
+ /* VAR[9]-VAR[10] */
|
||
+ 0x40000000,
|
||
+ 0x7fff7fff,
|
||
+
|
||
+ /* INC[0]-INC[3] */
|
||
+ 0x40000000,
|
||
+ 0xe0000000,
|
||
+ 0xa0000008,
|
||
+ 0x20000000,
|
||
+};
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,69 @@
|
||
+/*
|
||
+ * Bestcomm GenBD TX task microcode
|
||
+ *
|
||
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
|
||
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
|
||
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published
|
||
+ * by the Free Software Foundation.
|
||
+ *
|
||
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
|
||
+ * on Tue Mar 4 10:14:12 2006 GMT
|
||
+ *
|
||
+ */
|
||
+
|
||
+#include <asm/types.h>
|
||
+
|
||
+/*
|
||
+ * The header consists of the following fields:
|
||
+ * u32 magic;
|
||
+ * u8 desc_size;
|
||
+ * u8 var_size;
|
||
+ * u8 inc_size;
|
||
+ * u8 first_var;
|
||
+ * u8 reserved[8];
|
||
+ *
|
||
+ * The size fields contain the number of 32-bit words.
|
||
+ */
|
||
+
|
||
+u32 bcom_gen_bd_tx_task[] = {
|
||
+ /* header */
|
||
+ 0x4243544b,
|
||
+ 0x0f040609,
|
||
+ 0x00000000,
|
||
+ 0x00000000,
|
||
+
|
||
+ /* Task descriptors */
|
||
+ 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
|
||
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
|
||
+ 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */
|
||
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
|
||
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
|
||
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
|
||
+ 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */
|
||
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
|
||
+ 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */
|
||
+ 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */
|
||
+ 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */
|
||
+ 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */
|
||
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
|
||
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
|
||
+ 0x000001f8, /* NOP */
|
||
+
|
||
+ /* VAR[9]-VAR[12] */
|
||
+ 0x40000000,
|
||
+ 0x7fff7fff,
|
||
+ 0x00000000,
|
||
+ 0x40000004,
|
||
+
|
||
+ /* INC[0]-INC[5] */
|
||
+ 0x40000000,
|
||
+ 0xe0000000,
|
||
+ 0xe0000000,
|
||
+ 0xa0000008,
|
||
+ 0x20000000,
|
||
+ 0x4000ffff,
|
||
+};
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/bestcomm.c linux-3.0.101.xm510/drivers/dma/bestcomm/bestcomm.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/bestcomm.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/bestcomm.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,531 @@
|
||
+/*
|
||
+ * Driver for MPC52xx processor BestComm peripheral controller
|
||
+ *
|
||
+ *
|
||
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
|
||
+ * Copyright (C) 2005 Varma Electronics Oy,
|
||
+ * ( by Andrey Volkov <avolkov@varma-el.com> )
|
||
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
|
||
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
|
||
+ *
|
||
+ * This file is licensed under the terms of the GNU General Public License
|
||
+ * version 2. This program is licensed "as is" without any warranty of any
|
||
+ * kind, whether express or implied.
|
||
+ */
|
||
+
|
||
+#include <linux/module.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/of.h>
|
||
+#include <linux/of_device.h>
|
||
+#include <linux/of_platform.h>
|
||
+#include <asm/io.h>
|
||
+#include <asm/irq.h>
|
||
+#include <asm/mpc52xx.h>
|
||
+
|
||
+#include <linux/fsl/bestcomm/sram.h>
|
||
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
|
||
+#include "linux/fsl/bestcomm/bestcomm.h"
|
||
+
|
||
+#define DRIVER_NAME "bestcomm-core"
|
||
+
|
||
+/* MPC5200 device tree match tables */
|
||
+static struct of_device_id mpc52xx_sram_ids[] = {
|
||
+ { .compatible = "fsl,mpc5200-sram", },
|
||
+ { .compatible = "mpc5200-sram", },
|
||
+ {}
|
||
+};
|
||
+
|
||
+
|
||
+struct bcom_engine *bcom_eng = NULL;
|
||
+EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Public and private API */
|
||
+/* ======================================================================== */
|
||
+
|
||
+/* Private API */
|
||
+
|
||
+struct bcom_task *
|
||
+bcom_task_alloc(int bd_count, int bd_size, int priv_size)
|
||
+{
|
||
+ int i, tasknum = -1;
|
||
+ struct bcom_task *tsk;
|
||
+
|
||
+ /* Don't try to do anything if bestcomm init failed */
|
||
+ if (!bcom_eng)
|
||
+ return NULL;
|
||
+
|
||
+ /* Get and reserve a task num */
|
||
+ spin_lock(&bcom_eng->lock);
|
||
+
|
||
+ for (i=0; i<BCOM_MAX_TASKS; i++)
|
||
+ if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
|
||
+ bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
|
||
+ tasknum = i;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ spin_unlock(&bcom_eng->lock);
|
||
+
|
||
+ if (tasknum < 0)
|
||
+ return NULL;
|
||
+
|
||
+ /* Allocate our structure */
|
||
+ tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
|
||
+ if (!tsk)
|
||
+ goto error;
|
||
+
|
||
+ tsk->tasknum = tasknum;
|
||
+ if (priv_size)
|
||
+ tsk->priv = (void*)tsk + sizeof(struct bcom_task);
|
||
+
|
||
+ /* Get IRQ of that task */
|
||
+ tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
|
||
+ if (tsk->irq == NO_IRQ)
|
||
+ goto error;
|
||
+
|
||
+ /* Init the BDs, if needed */
|
||
+ if (bd_count) {
|
||
+ tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
|
||
+ if (!tsk->cookie)
|
||
+ goto error;
|
||
+
|
||
+ tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
|
||
+ if (!tsk->bd)
|
||
+ goto error;
|
||
+ memset(tsk->bd, 0x00, bd_count * bd_size);
|
||
+
|
||
+ tsk->num_bd = bd_count;
|
||
+ tsk->bd_size = bd_size;
|
||
+ }
|
||
+
|
||
+ return tsk;
|
||
+
|
||
+error:
|
||
+ if (tsk) {
|
||
+ if (tsk->irq != NO_IRQ)
|
||
+ irq_dispose_mapping(tsk->irq);
|
||
+ bcom_sram_free(tsk->bd);
|
||
+ kfree(tsk->cookie);
|
||
+ kfree(tsk);
|
||
+ }
|
||
+
|
||
+ bcom_eng->tdt[tasknum].stop = 0;
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_task_alloc);
|
||
+
|
||
+void
|
||
+bcom_task_free(struct bcom_task *tsk)
|
||
+{
|
||
+ /* Stop the task */
|
||
+ bcom_disable_task(tsk->tasknum);
|
||
+
|
||
+ /* Clear TDT */
|
||
+ bcom_eng->tdt[tsk->tasknum].start = 0;
|
||
+ bcom_eng->tdt[tsk->tasknum].stop = 0;
|
||
+
|
||
+ /* Free everything */
|
||
+ irq_dispose_mapping(tsk->irq);
|
||
+ bcom_sram_free(tsk->bd);
|
||
+ kfree(tsk->cookie);
|
||
+ kfree(tsk);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_task_free);
|
||
+
|
||
+int
|
||
+bcom_load_image(int task, u32 *task_image)
|
||
+{
|
||
+ struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
|
||
+ struct bcom_tdt *tdt;
|
||
+ u32 *desc, *var, *inc;
|
||
+ u32 *desc_src, *var_src, *inc_src;
|
||
+
|
||
+ /* Safety checks */
|
||
+ if (hdr->magic != BCOM_TASK_MAGIC) {
|
||
+ printk(KERN_ERR DRIVER_NAME
|
||
+ ": Trying to load invalid microcode\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
|
||
+ printk(KERN_ERR DRIVER_NAME
|
||
+ ": Trying to load invalid task %d\n", task);
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ /* Initial load or reload */
|
||
+ tdt = &bcom_eng->tdt[task];
|
||
+
|
||
+ if (tdt->start) {
|
||
+ desc = bcom_task_desc(task);
|
||
+ if (hdr->desc_size != bcom_task_num_descs(task)) {
|
||
+ printk(KERN_ERR DRIVER_NAME
|
||
+ ": Trying to reload wrong task image "
|
||
+ "(%d size %d/%d)!\n",
|
||
+ task,
|
||
+ hdr->desc_size,
|
||
+ bcom_task_num_descs(task));
|
||
+ return -EINVAL;
|
||
+ }
|
||
+ } else {
|
||
+ phys_addr_t start_pa;
|
||
+
|
||
+ desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
|
||
+ if (!desc)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ tdt->start = start_pa;
|
||
+ tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
|
||
+ }
|
||
+
|
||
+ var = bcom_task_var(task);
|
||
+ inc = bcom_task_inc(task);
|
||
+
|
||
+ /* Clear & copy */
|
||
+ memset(var, 0x00, BCOM_VAR_SIZE);
|
||
+ memset(inc, 0x00, BCOM_INC_SIZE);
|
||
+
|
||
+ desc_src = (u32 *)(hdr + 1);
|
||
+ var_src = desc_src + hdr->desc_size;
|
||
+ inc_src = var_src + hdr->var_size;
|
||
+
|
||
+ memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
|
||
+ memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
|
||
+ memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_load_image);
|
||
+
|
||
+void
|
||
+bcom_set_initiator(int task, int initiator)
|
||
+{
|
||
+ int i;
|
||
+ int num_descs;
|
||
+ u32 *desc;
|
||
+ int next_drd_has_initiator;
|
||
+
|
||
+ bcom_set_tcr_initiator(task, initiator);
|
||
+
|
||
+ /* Just setting tcr is apparently not enough due to some problem */
|
||
+ /* with it. So we just go thru all the microcode and replace in */
|
||
+ /* the DRD directly */
|
||
+
|
||
+ desc = bcom_task_desc(task);
|
||
+ next_drd_has_initiator = 1;
|
||
+ num_descs = bcom_task_num_descs(task);
|
||
+
|
||
+ for (i=0; i<num_descs; i++, desc++) {
|
||
+ if (!bcom_desc_is_drd(*desc))
|
||
+ continue;
|
||
+ if (next_drd_has_initiator)
|
||
+ if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
|
||
+ bcom_set_desc_initiator(desc, initiator);
|
||
+ next_drd_has_initiator = !bcom_drd_is_extended(*desc);
|
||
+ }
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_set_initiator);
|
||
+
|
||
+
|
||
+/* Public API */
|
||
+
|
||
+void
|
||
+bcom_enable(struct bcom_task *tsk)
|
||
+{
|
||
+ bcom_enable_task(tsk->tasknum);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_enable);
|
||
+
|
||
+void
|
||
+bcom_disable(struct bcom_task *tsk)
|
||
+{
|
||
+ bcom_disable_task(tsk->tasknum);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_disable);
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Engine init/cleanup */
|
||
+/* ======================================================================== */
|
||
+
|
||
+/* Function Descriptor table */
|
||
+/* this will need to be updated if Freescale changes their task code FDT */
|
||
+static u32 fdt_ops[] = {
|
||
+ 0xa0045670, /* FDT[48] - load_acc() */
|
||
+ 0x80045670, /* FDT[49] - unload_acc() */
|
||
+ 0x21800000, /* FDT[50] - and() */
|
||
+ 0x21e00000, /* FDT[51] - or() */
|
||
+ 0x21500000, /* FDT[52] - xor() */
|
||
+ 0x21400000, /* FDT[53] - andn() */
|
||
+ 0x21500000, /* FDT[54] - not() */
|
||
+ 0x20400000, /* FDT[55] - add() */
|
||
+ 0x20500000, /* FDT[56] - sub() */
|
||
+ 0x20800000, /* FDT[57] - lsh() */
|
||
+ 0x20a00000, /* FDT[58] - rsh() */
|
||
+ 0xc0170000, /* FDT[59] - crc8() */
|
||
+ 0xc0145670, /* FDT[60] - crc16() */
|
||
+ 0xc0345670, /* FDT[61] - crc32() */
|
||
+ 0xa0076540, /* FDT[62] - endian32() */
|
||
+ 0xa0000760, /* FDT[63] - endian16() */
|
||
+};
|
||
+
|
||
+
|
||
+static int bcom_engine_init(void)
|
||
+{
|
||
+ int task;
|
||
+ phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
|
||
+ unsigned int tdt_size, ctx_size, var_size, fdt_size;
|
||
+
|
||
+ /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
|
||
+ tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
|
||
+ ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
|
||
+ var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
|
||
+ fdt_size = BCOM_FDT_SIZE;
|
||
+
|
||
+ bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
|
||
+ bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
|
||
+ bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
|
||
+ bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
|
||
+
|
||
+ if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
|
||
+ printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
|
||
+
|
||
+ bcom_sram_free(bcom_eng->tdt);
|
||
+ bcom_sram_free(bcom_eng->ctx);
|
||
+ bcom_sram_free(bcom_eng->var);
|
||
+ bcom_sram_free(bcom_eng->fdt);
|
||
+
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ memset(bcom_eng->tdt, 0x00, tdt_size);
|
||
+ memset(bcom_eng->ctx, 0x00, ctx_size);
|
||
+ memset(bcom_eng->var, 0x00, var_size);
|
||
+ memset(bcom_eng->fdt, 0x00, fdt_size);
|
||
+
|
||
+ /* Copy the FDT for the EU#3 */
|
||
+ memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
|
||
+
|
||
+ /* Initialize Task base structure */
|
||
+ for (task=0; task<BCOM_MAX_TASKS; task++)
|
||
+ {
|
||
+ out_be16(&bcom_eng->regs->tcr[task], 0);
|
||
+ out_8(&bcom_eng->regs->ipr[task], 0);
|
||
+
|
||
+ bcom_eng->tdt[task].context = ctx_pa;
|
||
+ bcom_eng->tdt[task].var = var_pa;
|
||
+ bcom_eng->tdt[task].fdt = fdt_pa;
|
||
+
|
||
+ var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
|
||
+ ctx_pa += BCOM_CTX_SIZE;
|
||
+ }
|
||
+
|
||
+ out_be32(&bcom_eng->regs->taskBar, tdt_pa);
|
||
+
|
||
+ /* Init 'always' initiator */
|
||
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
|
||
+
|
||
+ /* Disable COMM Bus Prefetch on the original 5200; it's broken */
|
||
+ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
|
||
+ bcom_disable_prefetch();
|
||
+
|
||
+ /* Init lock */
|
||
+ spin_lock_init(&bcom_eng->lock);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void
|
||
+bcom_engine_cleanup(void)
|
||
+{
|
||
+ int task;
|
||
+
|
||
+ /* Stop all tasks */
|
||
+ for (task=0; task<BCOM_MAX_TASKS; task++)
|
||
+ {
|
||
+ out_be16(&bcom_eng->regs->tcr[task], 0);
|
||
+ out_8(&bcom_eng->regs->ipr[task], 0);
|
||
+ }
|
||
+
|
||
+ out_be32(&bcom_eng->regs->taskBar, 0ul);
|
||
+
|
||
+ /* Release the SRAM zones */
|
||
+ bcom_sram_free(bcom_eng->tdt);
|
||
+ bcom_sram_free(bcom_eng->ctx);
|
||
+ bcom_sram_free(bcom_eng->var);
|
||
+ bcom_sram_free(bcom_eng->fdt);
|
||
+}
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* OF platform driver */
|
||
+/* ======================================================================== */
|
||
+
|
||
+static int mpc52xx_bcom_probe(struct platform_device *op)
|
||
+{
|
||
+ struct device_node *ofn_sram;
|
||
+ struct resource res_bcom;
|
||
+
|
||
+ int rv;
|
||
+
|
||
+ /* Inform user we're ok so far */
|
||
+ printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
|
||
+
|
||
+ /* Get the bestcomm node */
|
||
+ of_node_get(op->dev.of_node);
|
||
+
|
||
+ /* Prepare SRAM */
|
||
+ ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
|
||
+ if (!ofn_sram) {
|
||
+ printk(KERN_ERR DRIVER_NAME ": "
|
||
+ "No SRAM found in device tree\n");
|
||
+ rv = -ENODEV;
|
||
+ goto error_ofput;
|
||
+ }
|
||
+ rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
|
||
+ of_node_put(ofn_sram);
|
||
+
|
||
+ if (rv) {
|
||
+ printk(KERN_ERR DRIVER_NAME ": "
|
||
+ "Error in SRAM init\n");
|
||
+ goto error_ofput;
|
||
+ }
|
||
+
|
||
+ /* Get a clean struct */
|
||
+ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
|
||
+ if (!bcom_eng) {
|
||
+ printk(KERN_ERR DRIVER_NAME ": "
|
||
+ "Can't allocate state structure\n");
|
||
+ rv = -ENOMEM;
|
||
+ goto error_sramclean;
|
||
+ }
|
||
+
|
||
+ /* Save the node */
|
||
+ bcom_eng->ofnode = op->dev.of_node;
|
||
+
|
||
+ /* Get, reserve & map io */
|
||
+ if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
|
||
+ printk(KERN_ERR DRIVER_NAME ": "
|
||
+ "Can't get resource\n");
|
||
+ rv = -EINVAL;
|
||
+ goto error_sramclean;
|
||
+ }
|
||
+
|
||
+ if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
|
||
+ DRIVER_NAME)) {
|
||
+ printk(KERN_ERR DRIVER_NAME ": "
|
||
+ "Can't request registers region\n");
|
||
+ rv = -EBUSY;
|
||
+ goto error_sramclean;
|
||
+ }
|
||
+
|
||
+ bcom_eng->regs_base = res_bcom.start;
|
||
+ bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
|
||
+ if (!bcom_eng->regs) {
|
||
+ printk(KERN_ERR DRIVER_NAME ": "
|
||
+ "Can't map registers\n");
|
||
+ rv = -ENOMEM;
|
||
+ goto error_release;
|
||
+ }
|
||
+
|
||
+ /* Now, do the real init */
|
||
+ rv = bcom_engine_init();
|
||
+ if (rv)
|
||
+ goto error_unmap;
|
||
+
|
||
+ /* Done ! */
|
||
+ printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
|
||
+ (long)bcom_eng->regs_base);
|
||
+
|
||
+ return 0;
|
||
+
|
||
+ /* Error path */
|
||
+error_unmap:
|
||
+ iounmap(bcom_eng->regs);
|
||
+error_release:
|
||
+ release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
|
||
+error_sramclean:
|
||
+ kfree(bcom_eng);
|
||
+ bcom_sram_cleanup();
|
||
+error_ofput:
|
||
+ of_node_put(op->dev.of_node);
|
||
+
|
||
+ printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
|
||
+
|
||
+ return rv;
|
||
+}
|
||
+
|
||
+
|
||
+static int mpc52xx_bcom_remove(struct platform_device *op)
|
||
+{
|
||
+ /* Clean up the engine */
|
||
+ bcom_engine_cleanup();
|
||
+
|
||
+ /* Cleanup SRAM */
|
||
+ bcom_sram_cleanup();
|
||
+
|
||
+ /* Release regs */
|
||
+ iounmap(bcom_eng->regs);
|
||
+ release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
|
||
+
|
||
+ /* Release the node */
|
||
+ of_node_put(bcom_eng->ofnode);
|
||
+
|
||
+ /* Release memory */
|
||
+ kfree(bcom_eng);
|
||
+ bcom_eng = NULL;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct of_device_id mpc52xx_bcom_of_match[] = {
|
||
+ { .compatible = "fsl,mpc5200-bestcomm", },
|
||
+ { .compatible = "mpc5200-bestcomm", },
|
||
+ {},
|
||
+};
|
||
+
|
||
+MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
|
||
+
|
||
+
|
||
+static struct platform_driver mpc52xx_bcom_of_platform_driver = {
|
||
+ .probe = mpc52xx_bcom_probe,
|
||
+ .remove = mpc52xx_bcom_remove,
|
||
+ .driver = {
|
||
+ .name = DRIVER_NAME,
|
||
+ .owner = THIS_MODULE,
|
||
+ .of_match_table = mpc52xx_bcom_of_match,
|
||
+ },
|
||
+};
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Module */
|
||
+/* ======================================================================== */
|
||
+
|
||
+static int __init
|
||
+mpc52xx_bcom_init(void)
|
||
+{
|
||
+ return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
|
||
+}
|
||
+
|
||
+static void __exit
|
||
+mpc52xx_bcom_exit(void)
|
||
+{
|
||
+ platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
|
||
+}
|
||
+
|
||
+/* If we're not a module, we must make sure everything is setup before */
|
||
+/* anyone tries to use us ... that's why we use subsys_initcall instead */
|
||
+/* of module_init. */
|
||
+subsys_initcall(mpc52xx_bcom_init);
|
||
+module_exit(mpc52xx_bcom_exit);
|
||
+
|
||
+MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
|
||
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
|
||
+MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
|
||
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
|
||
+MODULE_LICENSE("GPL v2");
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/fec.c linux-3.0.101.xm510/drivers/dma/bestcomm/fec.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/fec.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/fec.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,270 @@
|
||
+/*
|
||
+ * Bestcomm FEC tasks driver
|
||
+ *
|
||
+ *
|
||
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
|
||
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
|
||
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
|
||
+ *
|
||
+ * This file is licensed under the terms of the GNU General Public License
|
||
+ * version 2. This program is licensed "as is" without any warranty of any
|
||
+ * kind, whether express or implied.
|
||
+ */
|
||
+
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/types.h>
|
||
+#include <asm/io.h>
|
||
+
|
||
+#include <linux/fsl/bestcomm/bestcomm.h>
|
||
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
|
||
+#include <linux/fsl/bestcomm/fec.h>
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Task image/var/inc */
|
||
+/* ======================================================================== */
|
||
+
|
||
+/* fec tasks images */
|
||
+extern u32 bcom_fec_rx_task[];
|
||
+extern u32 bcom_fec_tx_task[];
|
||
+
|
||
+/* rx task vars that need to be set before enabling the task */
|
||
+struct bcom_fec_rx_var {
|
||
+ u32 enable; /* (u16*) address of task's control register */
|
||
+ u32 fifo; /* (u32*) address of fec's fifo */
|
||
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
|
||
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
|
||
+ u32 bd_start; /* (struct bcom_bd*) current bd */
|
||
+ u32 buffer_size; /* size of receive buffer */
|
||
+};
|
||
+
|
||
+/* rx task incs that need to be set before enabling the task */
|
||
+struct bcom_fec_rx_inc {
|
||
+ u16 pad0;
|
||
+ s16 incr_bytes;
|
||
+ u16 pad1;
|
||
+ s16 incr_dst;
|
||
+ u16 pad2;
|
||
+ s16 incr_dst_ma;
|
||
+};
|
||
+
|
||
+/* tx task vars that need to be set before enabling the task */
|
||
+struct bcom_fec_tx_var {
|
||
+ u32 DRD; /* (u32*) address of self-modified DRD */
|
||
+ u32 fifo; /* (u32*) address of fec's fifo */
|
||
+ u32 enable; /* (u16*) address of task's control register */
|
||
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
|
||
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
|
||
+ u32 bd_start; /* (struct bcom_bd*) current bd */
|
||
+ u32 buffer_size; /* set by uCode for each packet */
|
||
+};
|
||
+
|
||
+/* tx task incs that need to be set before enabling the task */
|
||
+struct bcom_fec_tx_inc {
|
||
+ u16 pad0;
|
||
+ s16 incr_bytes;
|
||
+ u16 pad1;
|
||
+ s16 incr_src;
|
||
+ u16 pad2;
|
||
+ s16 incr_src_ma;
|
||
+};
|
||
+
|
||
+/* private structure in the task */
|
||
+struct bcom_fec_priv {
|
||
+ phys_addr_t fifo;
|
||
+ int maxbufsize;
|
||
+};
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Task support code */
|
||
+/* ======================================================================== */
|
||
+
|
||
+struct bcom_task *
|
||
+bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize)
|
||
+{
|
||
+ struct bcom_task *tsk;
|
||
+ struct bcom_fec_priv *priv;
|
||
+
|
||
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
|
||
+ sizeof(struct bcom_fec_priv));
|
||
+ if (!tsk)
|
||
+ return NULL;
|
||
+
|
||
+ tsk->flags = BCOM_FLAGS_NONE;
|
||
+
|
||
+ priv = tsk->priv;
|
||
+ priv->fifo = fifo;
|
||
+ priv->maxbufsize = maxbufsize;
|
||
+
|
||
+ if (bcom_fec_rx_reset(tsk)) {
|
||
+ bcom_task_free(tsk);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ return tsk;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_fec_rx_init);
|
||
+
|
||
+int
|
||
+bcom_fec_rx_reset(struct bcom_task *tsk)
|
||
+{
|
||
+ struct bcom_fec_priv *priv = tsk->priv;
|
||
+ struct bcom_fec_rx_var *var;
|
||
+ struct bcom_fec_rx_inc *inc;
|
||
+
|
||
+ /* Shutdown the task */
|
||
+ bcom_disable_task(tsk->tasknum);
|
||
+
|
||
+ /* Reset the microcode */
|
||
+ var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
|
||
+ inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
|
||
+
|
||
+ if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
|
||
+ return -1;
|
||
+
|
||
+ var->enable = bcom_eng->regs_base +
|
||
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
|
||
+ var->fifo = (u32) priv->fifo;
|
||
+ var->bd_base = tsk->bd_pa;
|
||
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
|
||
+ var->bd_start = tsk->bd_pa;
|
||
+ var->buffer_size = priv->maxbufsize;
|
||
+
|
||
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
|
||
+ inc->incr_dst = sizeof(u32); /* task image, but we stick */
|
||
+ inc->incr_dst_ma= sizeof(u8); /* to the official ones */
|
||
+
|
||
+ /* Reset the BDs */
|
||
+ tsk->index = 0;
|
||
+ tsk->outdex = 0;
|
||
+
|
||
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||
+
|
||
+ /* Configure some stuff */
|
||
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
|
||
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
|
||
+
|
||
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX);
|
||
+
|
||
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_fec_rx_reset);
|
||
+
|
||
+void
|
||
+bcom_fec_rx_release(struct bcom_task *tsk)
|
||
+{
|
||
+ /* Nothing special for the FEC tasks */
|
||
+ bcom_task_free(tsk);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_fec_rx_release);
|
||
+
|
||
+
|
||
+
|
||
+ /* Return 2nd to last DRD */
|
||
+ /* This is an ugly hack, but at least it's only done
|
||
+ once at initialization */
|
||
+static u32 *self_modified_drd(int tasknum)
|
||
+{
|
||
+ u32 *desc;
|
||
+ int num_descs;
|
||
+ int drd_count;
|
||
+ int i;
|
||
+
|
||
+ num_descs = bcom_task_num_descs(tasknum);
|
||
+ desc = bcom_task_desc(tasknum) + num_descs - 1;
|
||
+ drd_count = 0;
|
||
+ for (i=0; i<num_descs; i++, desc--)
|
||
+ if (bcom_desc_is_drd(*desc) && ++drd_count == 3)
|
||
+ break;
|
||
+ return desc;
|
||
+}
|
||
+
|
||
+struct bcom_task *
|
||
+bcom_fec_tx_init(int queue_len, phys_addr_t fifo)
|
||
+{
|
||
+ struct bcom_task *tsk;
|
||
+ struct bcom_fec_priv *priv;
|
||
+
|
||
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
|
||
+ sizeof(struct bcom_fec_priv));
|
||
+ if (!tsk)
|
||
+ return NULL;
|
||
+
|
||
+ tsk->flags = BCOM_FLAGS_ENABLE_TASK;
|
||
+
|
||
+ priv = tsk->priv;
|
||
+ priv->fifo = fifo;
|
||
+
|
||
+ if (bcom_fec_tx_reset(tsk)) {
|
||
+ bcom_task_free(tsk);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ return tsk;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_fec_tx_init);
|
||
+
|
||
+int
|
||
+bcom_fec_tx_reset(struct bcom_task *tsk)
|
||
+{
|
||
+ struct bcom_fec_priv *priv = tsk->priv;
|
||
+ struct bcom_fec_tx_var *var;
|
||
+ struct bcom_fec_tx_inc *inc;
|
||
+
|
||
+ /* Shutdown the task */
|
||
+ bcom_disable_task(tsk->tasknum);
|
||
+
|
||
+ /* Reset the microcode */
|
||
+ var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
|
||
+ inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
|
||
+
|
||
+ if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
|
||
+ return -1;
|
||
+
|
||
+ var->enable = bcom_eng->regs_base +
|
||
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
|
||
+ var->fifo = (u32) priv->fifo;
|
||
+ var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
|
||
+ var->bd_base = tsk->bd_pa;
|
||
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
|
||
+ var->bd_start = tsk->bd_pa;
|
||
+
|
||
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
|
||
+ inc->incr_src = sizeof(u32); /* task image, but we stick */
|
||
+ inc->incr_src_ma= sizeof(u8); /* to the official ones */
|
||
+
|
||
+ /* Reset the BDs */
|
||
+ tsk->index = 0;
|
||
+ tsk->outdex = 0;
|
||
+
|
||
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||
+
|
||
+ /* Configure some stuff */
|
||
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
|
||
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
|
||
+
|
||
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX);
|
||
+
|
||
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_fec_tx_reset);
|
||
+
|
||
+void
|
||
+bcom_fec_tx_release(struct bcom_task *tsk)
|
||
+{
|
||
+ /* Nothing special for the FEC tasks */
|
||
+ bcom_task_free(tsk);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
|
||
+
|
||
+
|
||
+MODULE_DESCRIPTION("BestComm FEC tasks driver");
|
||
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
|
||
+MODULE_LICENSE("GPL v2");
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/gen_bd.c linux-3.0.101.xm510/drivers/dma/bestcomm/gen_bd.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/gen_bd.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/gen_bd.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,354 @@
|
||
+/*
|
||
+ * Driver for MPC52xx processor BestComm General Buffer Descriptor
|
||
+ *
|
||
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
|
||
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
|
||
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published
|
||
+ * by the Free Software Foundation.
|
||
+ *
|
||
+ */
|
||
+
|
||
+#include <linux/module.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/string.h>
|
||
+#include <linux/types.h>
|
||
+#include <asm/errno.h>
|
||
+#include <asm/io.h>
|
||
+
|
||
+#include <asm/mpc52xx.h>
|
||
+#include <asm/mpc52xx_psc.h>
|
||
+
|
||
+#include <linux/fsl/bestcomm/bestcomm.h>
|
||
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
|
||
+#include <linux/fsl/bestcomm/gen_bd.h>
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Task image/var/inc */
|
||
+/* ======================================================================== */
|
||
+
|
||
+/* gen_bd tasks images */
|
||
+extern u32 bcom_gen_bd_rx_task[];
|
||
+extern u32 bcom_gen_bd_tx_task[];
|
||
+
|
||
+/* rx task vars that need to be set before enabling the task */
|
||
+struct bcom_gen_bd_rx_var {
|
||
+ u32 enable; /* (u16*) address of task's control register */
|
||
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
|
||
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
|
||
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
|
||
+ u32 bd_start; /* (struct bcom_bd*) current bd */
|
||
+ u32 buffer_size; /* size of receive buffer */
|
||
+};
|
||
+
|
||
+/* rx task incs that need to be set before enabling the task */
|
||
+struct bcom_gen_bd_rx_inc {
|
||
+ u16 pad0;
|
||
+ s16 incr_bytes;
|
||
+ u16 pad1;
|
||
+ s16 incr_dst;
|
||
+};
|
||
+
|
||
+/* tx task vars that need to be set before enabling the task */
|
||
+struct bcom_gen_bd_tx_var {
|
||
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
|
||
+ u32 enable; /* (u16*) address of task's control register */
|
||
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
|
||
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
|
||
+ u32 bd_start; /* (struct bcom_bd*) current bd */
|
||
+ u32 buffer_size; /* set by uCode for each packet */
|
||
+};
|
||
+
|
||
+/* tx task incs that need to be set before enabling the task */
|
||
+struct bcom_gen_bd_tx_inc {
|
||
+ u16 pad0;
|
||
+ s16 incr_bytes;
|
||
+ u16 pad1;
|
||
+ s16 incr_src;
|
||
+ u16 pad2;
|
||
+ s16 incr_src_ma;
|
||
+};
|
||
+
|
||
+/* private structure */
|
||
+struct bcom_gen_bd_priv {
|
||
+ phys_addr_t fifo;
|
||
+ int initiator;
|
||
+ int ipr;
|
||
+ int maxbufsize;
|
||
+};
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Task support code */
|
||
+/* ======================================================================== */
|
||
+
|
||
+struct bcom_task *
|
||
+bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
|
||
+ int initiator, int ipr, int maxbufsize)
|
||
+{
|
||
+ struct bcom_task *tsk;
|
||
+ struct bcom_gen_bd_priv *priv;
|
||
+
|
||
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
|
||
+ sizeof(struct bcom_gen_bd_priv));
|
||
+ if (!tsk)
|
||
+ return NULL;
|
||
+
|
||
+ tsk->flags = BCOM_FLAGS_NONE;
|
||
+
|
||
+ priv = tsk->priv;
|
||
+ priv->fifo = fifo;
|
||
+ priv->initiator = initiator;
|
||
+ priv->ipr = ipr;
|
||
+ priv->maxbufsize = maxbufsize;
|
||
+
|
||
+ if (bcom_gen_bd_rx_reset(tsk)) {
|
||
+ bcom_task_free(tsk);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ return tsk;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init);
|
||
+
|
||
+int
|
||
+bcom_gen_bd_rx_reset(struct bcom_task *tsk)
|
||
+{
|
||
+ struct bcom_gen_bd_priv *priv = tsk->priv;
|
||
+ struct bcom_gen_bd_rx_var *var;
|
||
+ struct bcom_gen_bd_rx_inc *inc;
|
||
+
|
||
+ /* Shutdown the task */
|
||
+ bcom_disable_task(tsk->tasknum);
|
||
+
|
||
+ /* Reset the microcode */
|
||
+ var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum);
|
||
+ inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum);
|
||
+
|
||
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task))
|
||
+ return -1;
|
||
+
|
||
+ var->enable = bcom_eng->regs_base +
|
||
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
|
||
+ var->fifo = (u32) priv->fifo;
|
||
+ var->bd_base = tsk->bd_pa;
|
||
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
|
||
+ var->bd_start = tsk->bd_pa;
|
||
+ var->buffer_size = priv->maxbufsize;
|
||
+
|
||
+ inc->incr_bytes = -(s16)sizeof(u32);
|
||
+ inc->incr_dst = sizeof(u32);
|
||
+
|
||
+ /* Reset the BDs */
|
||
+ tsk->index = 0;
|
||
+ tsk->outdex = 0;
|
||
+
|
||
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||
+
|
||
+ /* Configure some stuff */
|
||
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
|
||
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
|
||
+
|
||
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
|
||
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
|
||
+
|
||
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset);
|
||
+
|
||
+void
|
||
+bcom_gen_bd_rx_release(struct bcom_task *tsk)
|
||
+{
|
||
+ /* Nothing special for the GenBD tasks */
|
||
+ bcom_task_free(tsk);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release);
|
||
+
|
||
+
|
||
+extern struct bcom_task *
|
||
+bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
|
||
+ int initiator, int ipr)
|
||
+{
|
||
+ struct bcom_task *tsk;
|
||
+ struct bcom_gen_bd_priv *priv;
|
||
+
|
||
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
|
||
+ sizeof(struct bcom_gen_bd_priv));
|
||
+ if (!tsk)
|
||
+ return NULL;
|
||
+
|
||
+ tsk->flags = BCOM_FLAGS_NONE;
|
||
+
|
||
+ priv = tsk->priv;
|
||
+ priv->fifo = fifo;
|
||
+ priv->initiator = initiator;
|
||
+ priv->ipr = ipr;
|
||
+
|
||
+ if (bcom_gen_bd_tx_reset(tsk)) {
|
||
+ bcom_task_free(tsk);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ return tsk;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init);
|
||
+
|
||
+int
|
||
+bcom_gen_bd_tx_reset(struct bcom_task *tsk)
|
||
+{
|
||
+ struct bcom_gen_bd_priv *priv = tsk->priv;
|
||
+ struct bcom_gen_bd_tx_var *var;
|
||
+ struct bcom_gen_bd_tx_inc *inc;
|
||
+
|
||
+ /* Shutdown the task */
|
||
+ bcom_disable_task(tsk->tasknum);
|
||
+
|
||
+ /* Reset the microcode */
|
||
+ var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum);
|
||
+ inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum);
|
||
+
|
||
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task))
|
||
+ return -1;
|
||
+
|
||
+ var->enable = bcom_eng->regs_base +
|
||
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
|
||
+ var->fifo = (u32) priv->fifo;
|
||
+ var->bd_base = tsk->bd_pa;
|
||
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
|
||
+ var->bd_start = tsk->bd_pa;
|
||
+
|
||
+ inc->incr_bytes = -(s16)sizeof(u32);
|
||
+ inc->incr_src = sizeof(u32);
|
||
+ inc->incr_src_ma = sizeof(u8);
|
||
+
|
||
+ /* Reset the BDs */
|
||
+ tsk->index = 0;
|
||
+ tsk->outdex = 0;
|
||
+
|
||
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||
+
|
||
+ /* Configure some stuff */
|
||
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
|
||
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
|
||
+
|
||
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
|
||
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
|
||
+
|
||
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset);
|
||
+
|
||
+void
|
||
+bcom_gen_bd_tx_release(struct bcom_task *tsk)
|
||
+{
|
||
+ /* Nothing special for the GenBD tasks */
|
||
+ bcom_task_free(tsk);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release);
|
||
+
|
||
+/* ---------------------------------------------------------------------
|
||
+ * PSC support code
|
||
+ */
|
||
+
|
||
+/**
|
||
+ * bcom_psc_parameters - Bestcomm initialization value table for PSC devices
|
||
+ *
|
||
+ * This structure is only used internally. It is a lookup table for PSC
|
||
+ * specific parameters to bestcomm tasks.
|
||
+ */
|
||
+static struct bcom_psc_params {
|
||
+ int rx_initiator;
|
||
+ int rx_ipr;
|
||
+ int tx_initiator;
|
||
+ int tx_ipr;
|
||
+} bcom_psc_params[] = {
|
||
+ [0] = {
|
||
+ .rx_initiator = BCOM_INITIATOR_PSC1_RX,
|
||
+ .rx_ipr = BCOM_IPR_PSC1_RX,
|
||
+ .tx_initiator = BCOM_INITIATOR_PSC1_TX,
|
||
+ .tx_ipr = BCOM_IPR_PSC1_TX,
|
||
+ },
|
||
+ [1] = {
|
||
+ .rx_initiator = BCOM_INITIATOR_PSC2_RX,
|
||
+ .rx_ipr = BCOM_IPR_PSC2_RX,
|
||
+ .tx_initiator = BCOM_INITIATOR_PSC2_TX,
|
||
+ .tx_ipr = BCOM_IPR_PSC2_TX,
|
||
+ },
|
||
+ [2] = {
|
||
+ .rx_initiator = BCOM_INITIATOR_PSC3_RX,
|
||
+ .rx_ipr = BCOM_IPR_PSC3_RX,
|
||
+ .tx_initiator = BCOM_INITIATOR_PSC3_TX,
|
||
+ .tx_ipr = BCOM_IPR_PSC3_TX,
|
||
+ },
|
||
+ [3] = {
|
||
+ .rx_initiator = BCOM_INITIATOR_PSC4_RX,
|
||
+ .rx_ipr = BCOM_IPR_PSC4_RX,
|
||
+ .tx_initiator = BCOM_INITIATOR_PSC4_TX,
|
||
+ .tx_ipr = BCOM_IPR_PSC4_TX,
|
||
+ },
|
||
+ [4] = {
|
||
+ .rx_initiator = BCOM_INITIATOR_PSC5_RX,
|
||
+ .rx_ipr = BCOM_IPR_PSC5_RX,
|
||
+ .tx_initiator = BCOM_INITIATOR_PSC5_TX,
|
||
+ .tx_ipr = BCOM_IPR_PSC5_TX,
|
||
+ },
|
||
+ [5] = {
|
||
+ .rx_initiator = BCOM_INITIATOR_PSC6_RX,
|
||
+ .rx_ipr = BCOM_IPR_PSC6_RX,
|
||
+ .tx_initiator = BCOM_INITIATOR_PSC6_TX,
|
||
+ .tx_ipr = BCOM_IPR_PSC6_TX,
|
||
+ },
|
||
+};
|
||
+
|
||
+/**
|
||
+ * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port
|
||
+ * @psc_num: Number of the PSC to allocate a task for
|
||
+ * @queue_len: number of buffer descriptors to allocate for the task
|
||
+ * @fifo: physical address of FIFO register
|
||
+ * @maxbufsize: Maximum receive data size in bytes.
|
||
+ *
|
||
+ * Allocate a bestcomm task structure for receiving data from a PSC.
|
||
+ */
|
||
+struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
|
||
+ phys_addr_t fifo, int maxbufsize)
|
||
+{
|
||
+ if (psc_num >= MPC52xx_PSC_MAXNUM)
|
||
+ return NULL;
|
||
+
|
||
+ return bcom_gen_bd_rx_init(queue_len, fifo,
|
||
+ bcom_psc_params[psc_num].rx_initiator,
|
||
+ bcom_psc_params[psc_num].rx_ipr,
|
||
+ maxbufsize);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init);
|
||
+
|
||
+/**
|
||
+ * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port
|
||
+ * @psc_num: Number of the PSC to allocate a task for
|
||
+ * @queue_len: number of buffer descriptors to allocate for the task
|
||
+ * @fifo: physical address of FIFO register
|
||
+ *
|
||
+ * Allocate a bestcomm task structure for transmitting data to a PSC.
|
||
+ */
|
||
+struct bcom_task *
|
||
+bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo)
|
||
+{
|
||
+ struct psc;
|
||
+ return bcom_gen_bd_tx_init(queue_len, fifo,
|
||
+ bcom_psc_params[psc_num].tx_initiator,
|
||
+ bcom_psc_params[psc_num].tx_ipr);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init);
|
||
+
|
||
+
|
||
+MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver");
|
||
+MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>");
|
||
+MODULE_LICENSE("GPL v2");
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/Kconfig linux-3.0.101.xm510/drivers/dma/bestcomm/Kconfig
|
||
--- linux-3.0.101/drivers/dma/bestcomm/Kconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,36 @@
|
||
+#
|
||
+# Kconfig options for Bestcomm
|
||
+#
|
||
+
|
||
+config PPC_BESTCOMM
|
||
+ tristate "Bestcomm DMA engine support"
|
||
+ depends on PPC_MPC52xx
|
||
+ default n
|
||
+ select PPC_LIB_RHEAP
|
||
+ help
|
||
+ BestComm is the name of the communication coprocessor found
|
||
+ on the Freescale MPC5200 family of processor. Its usage is
|
||
+ optional for some drivers (like ATA), but required for
|
||
+ others (like FEC).
|
||
+
|
||
+ If you want to use drivers that require DMA operations,
|
||
+ answer Y or M. Otherwise say N.
|
||
+
|
||
+config PPC_BESTCOMM_ATA
|
||
+ tristate
|
||
+ depends on PPC_BESTCOMM
|
||
+ help
|
||
+ This option enables the support for the ATA task.
|
||
+
|
||
+config PPC_BESTCOMM_FEC
|
||
+ tristate
|
||
+ depends on PPC_BESTCOMM
|
||
+ help
|
||
+ This option enables the support for the FEC tasks.
|
||
+
|
||
+config PPC_BESTCOMM_GEN_BD
|
||
+ tristate
|
||
+ depends on PPC_BESTCOMM
|
||
+ help
|
||
+ This option enables the support for the GenBD tasks.
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/Makefile linux-3.0.101.xm510/drivers/dma/bestcomm/Makefile
|
||
--- linux-3.0.101/drivers/dma/bestcomm/Makefile 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,14 @@
|
||
+#
|
||
+# Makefile for BestComm & co
|
||
+#
|
||
+
|
||
+bestcomm-core-objs := bestcomm.o sram.o
|
||
+bestcomm-ata-objs := ata.o bcom_ata_task.o
|
||
+bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o
|
||
+bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o
|
||
+
|
||
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o
|
||
+obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o
|
||
+obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o
|
||
+obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/bestcomm/sram.c linux-3.0.101.xm510/drivers/dma/bestcomm/sram.c
|
||
--- linux-3.0.101/drivers/dma/bestcomm/sram.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/bestcomm/sram.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,178 @@
|
||
+/*
|
||
+ * Simple memory allocator for on-board SRAM
|
||
+ *
|
||
+ *
|
||
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
|
||
+ *
|
||
+ * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
|
||
+ *
|
||
+ * This file is licensed under the terms of the GNU General Public License
|
||
+ * version 2. This program is licensed "as is" without any warranty of any
|
||
+ * kind, whether express or implied.
|
||
+ */
|
||
+
|
||
+#include <linux/err.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/export.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/spinlock.h>
|
||
+#include <linux/string.h>
|
||
+#include <linux/ioport.h>
|
||
+#include <linux/of.h>
|
||
+
|
||
+#include <asm/io.h>
|
||
+#include <asm/mmu.h>
|
||
+
|
||
+#include <linux/fsl/bestcomm/sram.h>
|
||
+
|
||
+
|
||
+/* Struct keeping our 'state' */
|
||
+struct bcom_sram *bcom_sram = NULL;
|
||
+EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
|
||
+
|
||
+
|
||
+/* ======================================================================== */
|
||
+/* Public API */
|
||
+/* ======================================================================== */
|
||
+/* DO NOT USE in interrupts, if needed in irq handler, we should use the
|
||
+ _irqsave version of the spin_locks */
|
||
+
|
||
+int bcom_sram_init(struct device_node *sram_node, char *owner)
|
||
+{
|
||
+ int rv;
|
||
+ const u32 *regaddr_p;
|
||
+ u64 regaddr64, size64;
|
||
+ unsigned int psize;
|
||
+
|
||
+ /* Create our state struct */
|
||
+ if (bcom_sram) {
|
||
+ printk(KERN_ERR "%s: bcom_sram_init: "
|
||
+ "Already initialized !\n", owner);
|
||
+ return -EBUSY;
|
||
+ }
|
||
+
|
||
+ bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
|
||
+ if (!bcom_sram) {
|
||
+ printk(KERN_ERR "%s: bcom_sram_init: "
|
||
+ "Couldn't allocate internal state !\n", owner);
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ /* Get address and size of the sram */
|
||
+ regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
|
||
+ if (!regaddr_p) {
|
||
+ printk(KERN_ERR "%s: bcom_sram_init: "
|
||
+ "Invalid device node !\n", owner);
|
||
+ rv = -EINVAL;
|
||
+ goto error_free;
|
||
+ }
|
||
+
|
||
+ regaddr64 = of_translate_address(sram_node, regaddr_p);
|
||
+
|
||
+ bcom_sram->base_phys = (phys_addr_t) regaddr64;
|
||
+ bcom_sram->size = (unsigned int) size64;
|
||
+
|
||
+ /* Request region */
|
||
+ if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
|
||
+ printk(KERN_ERR "%s: bcom_sram_init: "
|
||
+ "Couldn't request region !\n", owner);
|
||
+ rv = -EBUSY;
|
||
+ goto error_free;
|
||
+ }
|
||
+
|
||
+ /* Map SRAM */
|
||
+ /* sram is not really __iomem */
|
||
+ bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
|
||
+
|
||
+ if (!bcom_sram->base_virt) {
|
||
+ printk(KERN_ERR "%s: bcom_sram_init: "
|
||
+ "Map error SRAM zone 0x%08lx (0x%0x)!\n",
|
||
+ owner, (long)bcom_sram->base_phys, bcom_sram->size );
|
||
+ rv = -ENOMEM;
|
||
+ goto error_release;
|
||
+ }
|
||
+
|
||
+ /* Create an rheap (defaults to 32 bits word alignment) */
|
||
+ bcom_sram->rh = rh_create(4);
|
||
+
|
||
+ /* Attach the free zones */
|
||
+#if 0
|
||
+ /* Currently disabled ... for future use only */
|
||
+ reg_addr_p = of_get_property(sram_node, "available", &psize);
|
||
+#else
|
||
+ regaddr_p = NULL;
|
||
+ psize = 0;
|
||
+#endif
|
||
+
|
||
+ if (!regaddr_p || !psize) {
|
||
+ /* Attach the whole zone */
|
||
+ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
|
||
+ } else {
|
||
+ /* Attach each zone independently */
|
||
+ while (psize >= 2 * sizeof(u32)) {
|
||
+ phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
|
||
+ rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
|
||
+ regaddr_p += 2;
|
||
+ psize -= 2 * sizeof(u32);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Init our spinlock */
|
||
+ spin_lock_init(&bcom_sram->lock);
|
||
+
|
||
+ return 0;
|
||
+
|
||
+error_release:
|
||
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
|
||
+error_free:
|
||
+ kfree(bcom_sram);
|
||
+ bcom_sram = NULL;
|
||
+
|
||
+ return rv;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_sram_init);
|
||
+
|
||
+void bcom_sram_cleanup(void)
|
||
+{
|
||
+ /* Free resources */
|
||
+ if (bcom_sram) {
|
||
+ rh_destroy(bcom_sram->rh);
|
||
+ iounmap((void __iomem *)bcom_sram->base_virt);
|
||
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
|
||
+ kfree(bcom_sram);
|
||
+ bcom_sram = NULL;
|
||
+ }
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
|
||
+
|
||
+void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
|
||
+{
|
||
+ unsigned long offset;
|
||
+
|
||
+ spin_lock(&bcom_sram->lock);
|
||
+ offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
|
||
+ spin_unlock(&bcom_sram->lock);
|
||
+
|
||
+ if (IS_ERR_VALUE(offset))
|
||
+ return NULL;
|
||
+
|
||
+ *phys = bcom_sram->base_phys + offset;
|
||
+ return bcom_sram->base_virt + offset;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_sram_alloc);
|
||
+
|
||
+void bcom_sram_free(void *ptr)
|
||
+{
|
||
+ unsigned long offset;
|
||
+
|
||
+ if (!ptr)
|
||
+ return;
|
||
+
|
||
+ offset = ptr - bcom_sram->base_virt;
|
||
+
|
||
+ spin_lock(&bcom_sram->lock);
|
||
+ rh_free(bcom_sram->rh, offset);
|
||
+ spin_unlock(&bcom_sram->lock);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(bcom_sram_free);
|
||
+
|
||
diff -urN linux-3.0.101/drivers/dma/coh901318.h linux-3.0.101.xm510/drivers/dma/coh901318.h
|
||
--- linux-3.0.101/drivers/dma/coh901318.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/coh901318.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,141 @@
|
||
+/*
|
||
+ * Copyright (C) 2007-2013 ST-Ericsson
|
||
+ * License terms: GNU General Public License (GPL) version 2
|
||
+ * DMA driver for COH 901 318
|
||
+ * Author: Per Friden <per.friden@stericsson.com>
|
||
+ */
|
||
+
|
||
+#ifndef COH901318_H
|
||
+#define COH901318_H
|
||
+
|
||
+#define MAX_DMA_PACKET_SIZE_SHIFT 11
|
||
+#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
|
||
+
|
||
+struct device;
|
||
+
|
||
+struct coh901318_pool {
|
||
+ spinlock_t lock;
|
||
+ struct dma_pool *dmapool;
|
||
+ struct device *dev;
|
||
+
|
||
+#ifdef CONFIG_DEBUG_FS
|
||
+ int debugfs_pool_counter;
|
||
+#endif
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct coh901318_lli - linked list item for DMAC
|
||
+ * @control: control settings for DMAC
|
||
+ * @src_addr: transfer source address
|
||
+ * @dst_addr: transfer destination address
|
||
+ * @link_addr: physical address to next lli
|
||
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
|
||
+ * @phy_this: physical address of current lli (only used by pool_free)
|
||
+ */
|
||
+struct coh901318_lli {
|
||
+ u32 control;
|
||
+ dma_addr_t src_addr;
|
||
+ dma_addr_t dst_addr;
|
||
+ dma_addr_t link_addr;
|
||
+
|
||
+ void *virt_link_addr;
|
||
+ dma_addr_t phy_this;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * coh901318_pool_create() - Creates an dma pool for lli:s
|
||
+ * @pool: pool handle
|
||
+ * @dev: dma device
|
||
+ * @lli_nbr: number of lli:s in the pool
|
||
+ * @algin: address alignemtn of lli:s
|
||
+ * returns 0 on success otherwise none zero
|
||
+ */
|
||
+int coh901318_pool_create(struct coh901318_pool *pool,
|
||
+ struct device *dev,
|
||
+ size_t lli_nbr, size_t align);
|
||
+
|
||
+/**
|
||
+ * coh901318_pool_destroy() - Destroys the dma pool
|
||
+ * @pool: pool handle
|
||
+ * returns 0 on success otherwise none zero
|
||
+ */
|
||
+int coh901318_pool_destroy(struct coh901318_pool *pool);
|
||
+
|
||
+/**
|
||
+ * coh901318_lli_alloc() - Allocates a linked list
|
||
+ *
|
||
+ * @pool: pool handle
|
||
+ * @len: length to list
|
||
+ * return: none NULL if success otherwise NULL
|
||
+ */
|
||
+struct coh901318_lli *
|
||
+coh901318_lli_alloc(struct coh901318_pool *pool,
|
||
+ unsigned int len);
|
||
+
|
||
+/**
|
||
+ * coh901318_lli_free() - Returns the linked list items to the pool
|
||
+ * @pool: pool handle
|
||
+ * @lli: reference to lli pointer to be freed
|
||
+ */
|
||
+void coh901318_lli_free(struct coh901318_pool *pool,
|
||
+ struct coh901318_lli **lli);
|
||
+
|
||
+/**
|
||
+ * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
|
||
+ * @pool: pool handle
|
||
+ * @lli: allocated lli
|
||
+ * @src: src address
|
||
+ * @size: transfer size
|
||
+ * @dst: destination address
|
||
+ * @ctrl_chained: ctrl for chained lli
|
||
+ * @ctrl_last: ctrl for the last lli
|
||
+ * returns number of CPU interrupts for the lli, negative on error.
|
||
+ */
|
||
+int
|
||
+coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
|
||
+ struct coh901318_lli *lli,
|
||
+ dma_addr_t src, unsigned int size,
|
||
+ dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
|
||
+
|
||
+/**
|
||
+ * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
|
||
+ * @pool: pool handle
|
||
+ * @lli: allocated lli
|
||
+ * @buf: transfer buffer
|
||
+ * @size: transfer size
|
||
+ * @dev_addr: address of periphal
|
||
+ * @ctrl_chained: ctrl for chained lli
|
||
+ * @ctrl_last: ctrl for the last lli
|
||
+ * @dir: direction of transfer (to or from device)
|
||
+ * returns number of CPU interrupts for the lli, negative on error.
|
||
+ */
|
||
+int
|
||
+coh901318_lli_fill_single(struct coh901318_pool *pool,
|
||
+ struct coh901318_lli *lli,
|
||
+ dma_addr_t buf, unsigned int size,
|
||
+ dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
|
||
+ enum dma_transfer_direction dir);
|
||
+
|
||
+/**
|
||
+ * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
|
||
+ * @pool: pool handle
|
||
+ * @lli: allocated lli
|
||
+ * @sg: scatter gather list
|
||
+ * @nents: number of entries in sg
|
||
+ * @dev_addr: address of periphal
|
||
+ * @ctrl_chained: ctrl for chained lli
|
||
+ * @ctrl: ctrl of middle lli
|
||
+ * @ctrl_last: ctrl for the last lli
|
||
+ * @dir: direction of transfer (to or from device)
|
||
+ * @ctrl_irq_mask: ctrl mask for CPU interrupt
|
||
+ * returns number of CPU interrupts for the lli, negative on error.
|
||
+ */
|
||
+int
|
||
+coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
||
+ struct coh901318_lli *lli,
|
||
+ struct scatterlist *sg, unsigned int nents,
|
||
+ dma_addr_t dev_addr, u32 ctrl_chained,
|
||
+ u32 ctrl, u32 ctrl_last,
|
||
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask);
|
||
+
|
||
+#endif /* COH901318_H */
|
||
diff -urN linux-3.0.101/drivers/dma/dmaengine.h linux-3.0.101.xm510/drivers/dma/dmaengine.h
|
||
--- linux-3.0.101/drivers/dma/dmaengine.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dmaengine.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,89 @@
|
||
+/*
|
||
+ * The contents of this file are private to DMA engine drivers, and is not
|
||
+ * part of the API to be used by DMA engine users.
|
||
+ */
|
||
+#ifndef DMAENGINE_H
|
||
+#define DMAENGINE_H
|
||
+
|
||
+#include <linux/bug.h>
|
||
+#include <linux/dmaengine.h>
|
||
+
|
||
+/**
|
||
+ * dma_cookie_init - initialize the cookies for a DMA channel
|
||
+ * @chan: dma channel to initialize
|
||
+ */
|
||
+static inline void dma_cookie_init(struct dma_chan *chan)
|
||
+{
|
||
+ chan->cookie = DMA_MIN_COOKIE;
|
||
+ chan->completed_cookie = DMA_MIN_COOKIE;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
|
||
+ * @tx: descriptor needing cookie
|
||
+ *
|
||
+ * Assign a unique non-zero per-channel cookie to the descriptor.
|
||
+ * Note: caller is expected to hold a lock to prevent concurrency.
|
||
+ */
|
||
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct dma_chan *chan = tx->chan;
|
||
+ dma_cookie_t cookie;
|
||
+
|
||
+ cookie = chan->cookie + 1;
|
||
+ if (cookie < DMA_MIN_COOKIE)
|
||
+ cookie = DMA_MIN_COOKIE;
|
||
+ tx->cookie = chan->cookie = cookie;
|
||
+
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * dma_cookie_complete - complete a descriptor
|
||
+ * @tx: descriptor to complete
|
||
+ *
|
||
+ * Mark this descriptor complete by updating the channels completed
|
||
+ * cookie marker. Zero the descriptors cookie to prevent accidental
|
||
+ * repeated completions.
|
||
+ *
|
||
+ * Note: caller is expected to hold a lock to prevent concurrency.
|
||
+ */
|
||
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
|
||
+ tx->chan->completed_cookie = tx->cookie;
|
||
+ tx->cookie = 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * dma_cookie_status - report cookie status
|
||
+ * @chan: dma channel
|
||
+ * @cookie: cookie we are interested in
|
||
+ * @state: dma_tx_state structure to return last/used cookies
|
||
+ *
|
||
+ * Report the status of the cookie, filling in the state structure if
|
||
+ * non-NULL. No locking is required.
|
||
+ */
|
||
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie, struct dma_tx_state *state)
|
||
+{
|
||
+ dma_cookie_t used, complete;
|
||
+
|
||
+ used = chan->cookie;
|
||
+ complete = chan->completed_cookie;
|
||
+ barrier();
|
||
+ if (state) {
|
||
+ state->last = complete;
|
||
+ state->used = used;
|
||
+ state->residue = 0;
|
||
+ }
|
||
+ return dma_async_is_complete(cookie, complete, used);
|
||
+}
|
||
+
|
||
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
|
||
+{
|
||
+ if (state)
|
||
+ state->residue = residue;
|
||
+}
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/drivers/dma/dma-jz4740.c linux-3.0.101.xm510/drivers/dma/dma-jz4740.c
|
||
--- linux-3.0.101/drivers/dma/dma-jz4740.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dma-jz4740.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,617 @@
|
||
+/*
|
||
+ * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
|
||
+ * JZ4740 DMAC support
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License as published by the
|
||
+ * Free Software Foundation; either version 2 of the License, or (at your
|
||
+ * option) any later version.
|
||
+ *
|
||
+ * You should have received a copy of the GNU General Public License along
|
||
+ * with this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
|
||
+ *
|
||
+ */
|
||
+
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/list.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/spinlock.h>
|
||
+#include <linux/irq.h>
|
||
+#include <linux/clk.h>
|
||
+
|
||
+#include <asm/mach-jz4740/dma.h>
|
||
+
|
||
+#include "virt-dma.h"
|
||
+
|
||
+#define JZ_DMA_NR_CHANS 6
|
||
+
|
||
+#define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20)
|
||
+#define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20)
|
||
+#define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20)
|
||
+#define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20)
|
||
+#define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20)
|
||
+#define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20)
|
||
+#define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20)
|
||
+
|
||
+#define JZ_REG_DMA_CTRL 0x300
|
||
+#define JZ_REG_DMA_IRQ 0x304
|
||
+#define JZ_REG_DMA_DOORBELL 0x308
|
||
+#define JZ_REG_DMA_DOORBELL_SET 0x30C
|
||
+
|
||
+#define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31)
|
||
+#define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6)
|
||
+#define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4)
|
||
+#define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3)
|
||
+#define JZ_DMA_STATUS_CTRL_HALT BIT(2)
|
||
+#define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1)
|
||
+#define JZ_DMA_STATUS_CTRL_ENABLE BIT(0)
|
||
+
|
||
+#define JZ_DMA_CMD_SRC_INC BIT(23)
|
||
+#define JZ_DMA_CMD_DST_INC BIT(22)
|
||
+#define JZ_DMA_CMD_RDIL_MASK (0xf << 16)
|
||
+#define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14)
|
||
+#define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12)
|
||
+#define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8)
|
||
+#define JZ_DMA_CMD_BLOCK_MODE BIT(7)
|
||
+#define JZ_DMA_CMD_DESC_VALID BIT(4)
|
||
+#define JZ_DMA_CMD_DESC_VALID_MODE BIT(3)
|
||
+#define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2)
|
||
+#define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1)
|
||
+#define JZ_DMA_CMD_LINK_ENABLE BIT(0)
|
||
+
|
||
+#define JZ_DMA_CMD_FLAGS_OFFSET 22
|
||
+#define JZ_DMA_CMD_RDIL_OFFSET 16
|
||
+#define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14
|
||
+#define JZ_DMA_CMD_DST_WIDTH_OFFSET 12
|
||
+#define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8
|
||
+#define JZ_DMA_CMD_MODE_OFFSET 7
|
||
+
|
||
+#define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8)
|
||
+#define JZ_DMA_CTRL_HALT BIT(3)
|
||
+#define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2)
|
||
+#define JZ_DMA_CTRL_ENABLE BIT(0)
|
||
+
|
||
+enum jz4740_dma_width {
|
||
+ JZ4740_DMA_WIDTH_32BIT = 0,
|
||
+ JZ4740_DMA_WIDTH_8BIT = 1,
|
||
+ JZ4740_DMA_WIDTH_16BIT = 2,
|
||
+};
|
||
+
|
||
+enum jz4740_dma_transfer_size {
|
||
+ JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
|
||
+ JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
|
||
+ JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
|
||
+ JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
|
||
+ JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
|
||
+};
|
||
+
|
||
+enum jz4740_dma_flags {
|
||
+ JZ4740_DMA_SRC_AUTOINC = 0x2,
|
||
+ JZ4740_DMA_DST_AUTOINC = 0x1,
|
||
+};
|
||
+
|
||
+enum jz4740_dma_mode {
|
||
+ JZ4740_DMA_MODE_SINGLE = 0,
|
||
+ JZ4740_DMA_MODE_BLOCK = 1,
|
||
+};
|
||
+
|
||
+struct jz4740_dma_sg {
|
||
+ dma_addr_t addr;
|
||
+ unsigned int len;
|
||
+};
|
||
+
|
||
+struct jz4740_dma_desc {
|
||
+ struct virt_dma_desc vdesc;
|
||
+
|
||
+ enum dma_transfer_direction direction;
|
||
+ bool cyclic;
|
||
+
|
||
+ unsigned int num_sgs;
|
||
+ struct jz4740_dma_sg sg[];
|
||
+};
|
||
+
|
||
+struct jz4740_dmaengine_chan {
|
||
+ struct virt_dma_chan vchan;
|
||
+ unsigned int id;
|
||
+
|
||
+ dma_addr_t fifo_addr;
|
||
+ unsigned int transfer_shift;
|
||
+
|
||
+ struct jz4740_dma_desc *desc;
|
||
+ unsigned int next_sg;
|
||
+};
|
||
+
|
||
+struct jz4740_dma_dev {
|
||
+ struct dma_device ddev;
|
||
+ void __iomem *base;
|
||
+ struct clk *clk;
|
||
+
|
||
+ struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
|
||
+};
|
||
+
|
||
+static struct jz4740_dma_dev *jz4740_dma_chan_get_dev(
|
||
+ struct jz4740_dmaengine_chan *chan)
|
||
+{
|
||
+ return container_of(chan->vchan.chan.device, struct jz4740_dma_dev,
|
||
+ ddev);
|
||
+}
|
||
+
|
||
+static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
|
||
+{
|
||
+ return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
|
||
+}
|
||
+
|
||
+static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
|
||
+{
|
||
+ return container_of(vdesc, struct jz4740_dma_desc, vdesc);
|
||
+}
|
||
+
|
||
+static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev,
|
||
+ unsigned int reg)
|
||
+{
|
||
+ return readl(dmadev->base + reg);
|
||
+}
|
||
+
|
||
+static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev,
|
||
+ unsigned reg, uint32_t val)
|
||
+{
|
||
+ writel(val, dmadev->base + reg);
|
||
+}
|
||
+
|
||
+static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev,
|
||
+ unsigned int reg, uint32_t val, uint32_t mask)
|
||
+{
|
||
+ uint32_t tmp;
|
||
+
|
||
+ tmp = jz4740_dma_read(dmadev, reg);
|
||
+ tmp &= ~mask;
|
||
+ tmp |= val;
|
||
+ jz4740_dma_write(dmadev, reg, tmp);
|
||
+}
|
||
+
|
||
+static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
|
||
+{
|
||
+ return kzalloc(sizeof(struct jz4740_dma_desc) +
|
||
+ sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
|
||
+}
|
||
+
|
||
+static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
|
||
+{
|
||
+ switch (width) {
|
||
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||
+ return JZ4740_DMA_WIDTH_8BIT;
|
||
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||
+ return JZ4740_DMA_WIDTH_16BIT;
|
||
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||
+ return JZ4740_DMA_WIDTH_32BIT;
|
||
+ default:
|
||
+ return JZ4740_DMA_WIDTH_32BIT;
|
||
+ }
|
||
+}
|
||
+
|
||
+static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
|
||
+{
|
||
+ if (maxburst <= 1)
|
||
+ return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
|
||
+ else if (maxburst <= 3)
|
||
+ return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
|
||
+ else if (maxburst <= 15)
|
||
+ return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
|
||
+ else if (maxburst <= 31)
|
||
+ return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
|
||
+
|
||
+ return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
|
||
+}
|
||
+
|
||
+static int jz4740_dma_slave_config(struct dma_chan *c,
|
||
+ const struct dma_slave_config *config)
|
||
+{
|
||
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
|
||
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
|
||
+ enum jz4740_dma_width src_width;
|
||
+ enum jz4740_dma_width dst_width;
|
||
+ enum jz4740_dma_transfer_size transfer_size;
|
||
+ enum jz4740_dma_flags flags;
|
||
+ uint32_t cmd;
|
||
+
|
||
+ switch (config->direction) {
|
||
+ case DMA_MEM_TO_DEV:
|
||
+ flags = JZ4740_DMA_SRC_AUTOINC;
|
||
+ transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
|
||
+ chan->fifo_addr = config->dst_addr;
|
||
+ break;
|
||
+ case DMA_DEV_TO_MEM:
|
||
+ flags = JZ4740_DMA_DST_AUTOINC;
|
||
+ transfer_size = jz4740_dma_maxburst(config->src_maxburst);
|
||
+ chan->fifo_addr = config->src_addr;
|
||
+ break;
|
||
+ default:
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ src_width = jz4740_dma_width(config->src_addr_width);
|
||
+ dst_width = jz4740_dma_width(config->dst_addr_width);
|
||
+
|
||
+ switch (transfer_size) {
|
||
+ case JZ4740_DMA_TRANSFER_SIZE_2BYTE:
|
||
+ chan->transfer_shift = 1;
|
||
+ break;
|
||
+ case JZ4740_DMA_TRANSFER_SIZE_4BYTE:
|
||
+ chan->transfer_shift = 2;
|
||
+ break;
|
||
+ case JZ4740_DMA_TRANSFER_SIZE_16BYTE:
|
||
+ chan->transfer_shift = 4;
|
||
+ break;
|
||
+ case JZ4740_DMA_TRANSFER_SIZE_32BYTE:
|
||
+ chan->transfer_shift = 5;
|
||
+ break;
|
||
+ default:
|
||
+ chan->transfer_shift = 0;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET;
|
||
+ cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET;
|
||
+ cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET;
|
||
+ cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET;
|
||
+ cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET;
|
||
+ cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE;
|
||
+
|
||
+ jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd);
|
||
+ jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0);
|
||
+ jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id),
|
||
+ config->slave_id);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int jz4740_dma_terminate_all(struct dma_chan *c)
|
||
+{
|
||
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
|
||
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(head);
|
||
+
|
||
+ spin_lock_irqsave(&chan->vchan.lock, flags);
|
||
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
|
||
+ JZ_DMA_STATUS_CTRL_ENABLE);
|
||
+ chan->desc = NULL;
|
||
+ vchan_get_all_descriptors(&chan->vchan, &head);
|
||
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||
+
|
||
+ vchan_dma_desc_free_list(&chan->vchan, &head);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct dma_slave_config *config = (struct dma_slave_config *)arg;
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ return jz4740_dma_slave_config(chan, config);
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ return jz4740_dma_terminate_all(chan);
|
||
+ default:
|
||
+ return -ENOSYS;
|
||
+ }
|
||
+}
|
||
+
|
||
+static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
|
||
+{
|
||
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
|
||
+ dma_addr_t src_addr, dst_addr;
|
||
+ struct virt_dma_desc *vdesc;
|
||
+ struct jz4740_dma_sg *sg;
|
||
+
|
||
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
|
||
+ JZ_DMA_STATUS_CTRL_ENABLE);
|
||
+
|
||
+ if (!chan->desc) {
|
||
+ vdesc = vchan_next_desc(&chan->vchan);
|
||
+ if (!vdesc)
|
||
+ return 0;
|
||
+ chan->desc = to_jz4740_dma_desc(vdesc);
|
||
+ chan->next_sg = 0;
|
||
+ }
|
||
+
|
||
+ if (chan->next_sg == chan->desc->num_sgs)
|
||
+ chan->next_sg = 0;
|
||
+
|
||
+ sg = &chan->desc->sg[chan->next_sg];
|
||
+
|
||
+ if (chan->desc->direction == DMA_MEM_TO_DEV) {
|
||
+ src_addr = sg->addr;
|
||
+ dst_addr = chan->fifo_addr;
|
||
+ } else {
|
||
+ src_addr = chan->fifo_addr;
|
||
+ dst_addr = sg->addr;
|
||
+ }
|
||
+ jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr);
|
||
+ jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr);
|
||
+ jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id),
|
||
+ sg->len >> chan->transfer_shift);
|
||
+
|
||
+ chan->next_sg++;
|
||
+
|
||
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id),
|
||
+ JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE,
|
||
+ JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC |
|
||
+ JZ_DMA_STATUS_CTRL_ENABLE);
|
||
+
|
||
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL,
|
||
+ JZ_DMA_CTRL_ENABLE,
|
||
+ JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
|
||
+{
|
||
+ spin_lock(&chan->vchan.lock);
|
||
+ if (chan->desc) {
|
||
+ if (chan->desc && chan->desc->cyclic) {
|
||
+ vchan_cyclic_callback(&chan->desc->vdesc);
|
||
+ } else {
|
||
+ if (chan->next_sg == chan->desc->num_sgs) {
|
||
+ chan->desc = NULL;
|
||
+ vchan_cookie_complete(&chan->desc->vdesc);
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ jz4740_dma_start_transfer(chan);
|
||
+ spin_unlock(&chan->vchan.lock);
|
||
+}
|
||
+
|
||
+static irqreturn_t jz4740_dma_irq(int irq, void *devid)
|
||
+{
|
||
+ struct jz4740_dma_dev *dmadev = devid;
|
||
+ uint32_t irq_status;
|
||
+ unsigned int i;
|
||
+
|
||
+ irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ);
|
||
+
|
||
+ for (i = 0; i < 6; ++i) {
|
||
+ if (irq_status & (1 << i)) {
|
||
+ jz4740_dma_write_mask(dmadev,
|
||
+ JZ_REG_DMA_STATUS_CTRL(i), 0,
|
||
+ JZ_DMA_STATUS_CTRL_ENABLE |
|
||
+ JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
|
||
+
|
||
+ jz4740_dma_chan_irq(&dmadev->chan[i]);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+static void jz4740_dma_issue_pending(struct dma_chan *c)
|
||
+{
|
||
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&chan->vchan.lock, flags);
|
||
+ if (vchan_issue_pending(&chan->vchan) && !chan->desc)
|
||
+ jz4740_dma_start_transfer(chan);
|
||
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
|
||
+ struct dma_chan *c, struct scatterlist *sgl,
|
||
+ unsigned int sg_len, enum dma_transfer_direction direction,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
|
||
+ struct jz4740_dma_desc *desc;
|
||
+ struct scatterlist *sg;
|
||
+ unsigned int i;
|
||
+
|
||
+ desc = jz4740_dma_alloc_desc(sg_len);
|
||
+ if (!desc)
|
||
+ return NULL;
|
||
+
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ desc->sg[i].addr = sg_dma_address(sg);
|
||
+ desc->sg[i].len = sg_dma_len(sg);
|
||
+ }
|
||
+
|
||
+ desc->num_sgs = sg_len;
|
||
+ desc->direction = direction;
|
||
+ desc->cyclic = false;
|
||
+
|
||
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
|
||
+ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
|
||
+ size_t period_len, enum dma_transfer_direction direction,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
|
||
+ struct jz4740_dma_desc *desc;
|
||
+ unsigned int num_periods, i;
|
||
+
|
||
+ if (buf_len % period_len)
|
||
+ return NULL;
|
||
+
|
||
+ num_periods = buf_len / period_len;
|
||
+
|
||
+ desc = jz4740_dma_alloc_desc(num_periods);
|
||
+ if (!desc)
|
||
+ return NULL;
|
||
+
|
||
+ for (i = 0; i < num_periods; i++) {
|
||
+ desc->sg[i].addr = buf_addr;
|
||
+ desc->sg[i].len = period_len;
|
||
+ buf_addr += period_len;
|
||
+ }
|
||
+
|
||
+ desc->num_sgs = num_periods;
|
||
+ desc->direction = direction;
|
||
+ desc->cyclic = true;
|
||
+
|
||
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
|
||
+}
|
||
+
|
||
+static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
|
||
+ struct jz4740_dma_desc *desc, unsigned int next_sg)
|
||
+{
|
||
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
|
||
+ unsigned int residue, count;
|
||
+ unsigned int i;
|
||
+
|
||
+ residue = 0;
|
||
+
|
||
+ for (i = next_sg; i < desc->num_sgs; i++)
|
||
+ residue += desc->sg[i].len;
|
||
+
|
||
+ if (next_sg != 0) {
|
||
+ count = jz4740_dma_read(dmadev,
|
||
+ JZ_REG_DMA_TRANSFER_COUNT(chan->id));
|
||
+ residue += count << chan->transfer_shift;
|
||
+ }
|
||
+
|
||
+ return residue;
|
||
+}
|
||
+
|
||
+static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
|
||
+ dma_cookie_t cookie, struct dma_tx_state *state)
|
||
+{
|
||
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
|
||
+ struct virt_dma_desc *vdesc;
|
||
+ enum dma_status status;
|
||
+ unsigned long flags;
|
||
+
|
||
+ status = dma_cookie_status(c, cookie, state);
|
||
+ if (status == DMA_SUCCESS || !state)
|
||
+ return status;
|
||
+
|
||
+ spin_lock_irqsave(&chan->vchan.lock, flags);
|
||
+ vdesc = vchan_find_desc(&chan->vchan, cookie);
|
||
+ if (cookie == chan->desc->vdesc.tx.cookie) {
|
||
+ state->residue = jz4740_dma_desc_residue(chan, chan->desc,
|
||
+ chan->next_sg);
|
||
+ } else if (vdesc) {
|
||
+ state->residue = jz4740_dma_desc_residue(chan,
|
||
+ to_jz4740_dma_desc(vdesc), 0);
|
||
+ } else {
|
||
+ state->residue = 0;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||
+
|
||
+ return status;
|
||
+}
|
||
+
|
||
+static int jz4740_dma_alloc_chan_resources(struct dma_chan *c)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void jz4740_dma_free_chan_resources(struct dma_chan *c)
|
||
+{
|
||
+ vchan_free_chan_resources(to_virt_chan(c));
|
||
+}
|
||
+
|
||
+static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
|
||
+{
|
||
+ kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
|
||
+}
|
||
+
|
||
+static int jz4740_dma_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct jz4740_dmaengine_chan *chan;
|
||
+ struct jz4740_dma_dev *dmadev;
|
||
+ struct dma_device *dd;
|
||
+ unsigned int i;
|
||
+ struct resource *res;
|
||
+ int ret;
|
||
+ int irq;
|
||
+
|
||
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
|
||
+ if (!dmadev)
|
||
+ return -EINVAL;
|
||
+
|
||
+ dd = &dmadev->ddev;
|
||
+
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ dmadev->base = devm_ioremap_resource(&pdev->dev, res);
|
||
+ if (IS_ERR(dmadev->base))
|
||
+ return PTR_ERR(dmadev->base);
|
||
+
|
||
+ dmadev->clk = clk_get(&pdev->dev, "dma");
|
||
+ if (IS_ERR(dmadev->clk))
|
||
+ return PTR_ERR(dmadev->clk);
|
||
+
|
||
+ clk_prepare_enable(dmadev->clk);
|
||
+
|
||
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
|
||
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
|
||
+ dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources;
|
||
+ dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
|
||
+ dd->device_tx_status = jz4740_dma_tx_status;
|
||
+ dd->device_issue_pending = jz4740_dma_issue_pending;
|
||
+ dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
|
||
+ dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
|
||
+ dd->device_control = jz4740_dma_control;
|
||
+ dd->dev = &pdev->dev;
|
||
+ dd->chancnt = JZ_DMA_NR_CHANS;
|
||
+ INIT_LIST_HEAD(&dd->channels);
|
||
+
|
||
+ for (i = 0; i < dd->chancnt; i++) {
|
||
+ chan = &dmadev->chan[i];
|
||
+ chan->id = i;
|
||
+ chan->vchan.desc_free = jz4740_dma_desc_free;
|
||
+ vchan_init(&chan->vchan, dd);
|
||
+ }
|
||
+
|
||
+ ret = dma_async_device_register(dd);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+
|
||
+ irq = platform_get_irq(pdev, 0);
|
||
+ ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
|
||
+ if (ret)
|
||
+ goto err_unregister;
|
||
+
|
||
+ platform_set_drvdata(pdev, dmadev);
|
||
+
|
||
+ return 0;
|
||
+
|
||
+err_unregister:
|
||
+ dma_async_device_unregister(dd);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int jz4740_dma_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
|
||
+ int irq = platform_get_irq(pdev, 0);
|
||
+
|
||
+ free_irq(irq, dmadev);
|
||
+ dma_async_device_unregister(&dmadev->ddev);
|
||
+ clk_disable_unprepare(dmadev->clk);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct platform_driver jz4740_dma_driver = {
|
||
+ .probe = jz4740_dma_probe,
|
||
+ .remove = jz4740_dma_remove,
|
||
+ .driver = {
|
||
+ .name = "jz4740-dma",
|
||
+ .owner = THIS_MODULE,
|
||
+ },
|
||
+};
|
||
+module_platform_driver(jz4740_dma_driver);
|
||
+
|
||
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
|
||
+MODULE_DESCRIPTION("JZ4740 DMA driver");
|
||
+MODULE_LICENSE("GPLv2");
|
||
diff -urN linux-3.0.101/drivers/dma/dw/core.c linux-3.0.101.xm510/drivers/dma/dw/core.c
|
||
--- linux-3.0.101/drivers/dma/dw/core.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dw/core.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,1730 @@
|
||
+/*
|
||
+ * Core driver for the Synopsys DesignWare DMA Controller
|
||
+ *
|
||
+ * Copyright (C) 2007-2008 Atmel Corporation
|
||
+ * Copyright (C) 2010-2011 ST Microelectronics
|
||
+ * Copyright (C) 2013 Intel Corporation
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/bitops.h>
|
||
+#include <linux/clk.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/dmapool.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/mm.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/slab.h>
|
||
+
|
||
+#include "../dmaengine.h"
|
||
+#include "internal.h"
|
||
+
|
||
+/*
|
||
+ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
|
||
+ * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
|
||
+ * of which use ARM any more). See the "Databook" from Synopsys for
|
||
+ * information beyond what licensees probably provide.
|
||
+ *
|
||
+ * The driver has currently been tested only with the Atmel AT32AP7000,
|
||
+ * which does not support descriptor writeback.
|
||
+ */
|
||
+
|
||
+static inline void dwc_set_masters(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||
+ struct dw_dma_slave *dws = dwc->chan.private;
|
||
+ unsigned char mmax = dw->nr_masters - 1;
|
||
+
|
||
+ if (dwc->request_line == ~0) {
|
||
+ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
|
||
+ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
|
||
+ }
|
||
+}
|
||
+
|
||
+#define DWC_DEFAULT_CTLLO(_chan) ({ \
|
||
+ struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
|
||
+ struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
|
||
+ bool _is_slave = is_slave_direction(_dwc->direction); \
|
||
+ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
|
||
+ DW_DMA_MSIZE_16; \
|
||
+ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
|
||
+ DW_DMA_MSIZE_16; \
|
||
+ \
|
||
+ (DWC_CTLL_DST_MSIZE(_dmsize) \
|
||
+ | DWC_CTLL_SRC_MSIZE(_smsize) \
|
||
+ | DWC_CTLL_LLP_D_EN \
|
||
+ | DWC_CTLL_LLP_S_EN \
|
||
+ | DWC_CTLL_DMS(_dwc->dst_master) \
|
||
+ | DWC_CTLL_SMS(_dwc->src_master)); \
|
||
+ })
|
||
+
|
||
+/*
|
||
+ * Number of descriptors to allocate for each channel. This should be
|
||
+ * made configurable somehow; preferably, the clients (at least the
|
||
+ * ones using slave transfers) should be able to give us a hint.
|
||
+ */
|
||
+#define NR_DESCS_PER_CHANNEL 64
|
||
+
|
||
+/*----------------------------------------------------------------------*/
|
||
+
|
||
+static struct device *chan2dev(struct dma_chan *chan)
|
||
+{
|
||
+ return &chan->dev->device;
|
||
+}
|
||
+static struct device *chan2parent(struct dma_chan *chan)
|
||
+{
|
||
+ return chan->dev->device.parent;
|
||
+}
|
||
+
|
||
+static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ return to_dw_desc(dwc->active_list.next);
|
||
+}
|
||
+
|
||
+static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ struct dw_desc *desc, *_desc;
|
||
+ struct dw_desc *ret = NULL;
|
||
+ unsigned int i = 0;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
|
||
+ i++;
|
||
+ if (async_tx_test_ack(&desc->txd)) {
|
||
+ list_del(&desc->desc_node);
|
||
+ ret = desc;
|
||
+ break;
|
||
+ }
|
||
+ dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
|
||
+ }
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * Move a descriptor, including any children, to the free list.
|
||
+ * `desc' must not be on any lists.
|
||
+ */
|
||
+static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
||
+{
|
||
+ unsigned long flags;
|
||
+
|
||
+ if (desc) {
|
||
+ struct dw_desc *child;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ list_for_each_entry(child, &desc->tx_list, desc_node)
|
||
+ dev_vdbg(chan2dev(&dwc->chan),
|
||
+ "moving child desc %p to freelist\n",
|
||
+ child);
|
||
+ list_splice_init(&desc->tx_list, &dwc->free_list);
|
||
+ dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
|
||
+ list_add(&desc->desc_node, &dwc->free_list);
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ }
|
||
+}
|
||
+
|
||
+static void dwc_initialize(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||
+ struct dw_dma_slave *dws = dwc->chan.private;
|
||
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
|
||
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
|
||
+
|
||
+ if (dwc->initialized == true)
|
||
+ return;
|
||
+
|
||
+ if (dws) {
|
||
+ /*
|
||
+ * We need controller-specific data to set up slave
|
||
+ * transfers.
|
||
+ */
|
||
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
|
||
+
|
||
+ cfghi = dws->cfg_hi;
|
||
+ cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
|
||
+ } else {
|
||
+ if (dwc->direction == DMA_MEM_TO_DEV)
|
||
+ cfghi = DWC_CFGH_DST_PER(dwc->request_line);
|
||
+ else if (dwc->direction == DMA_DEV_TO_MEM)
|
||
+ cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
|
||
+ }
|
||
+
|
||
+ channel_writel(dwc, CFG_LO, cfglo);
|
||
+ channel_writel(dwc, CFG_HI, cfghi);
|
||
+
|
||
+ /* Enable interrupts */
|
||
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
|
||
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
|
||
+
|
||
+ dwc->initialized = true;
|
||
+}
|
||
+
|
||
+/*----------------------------------------------------------------------*/
|
||
+
|
||
+static inline unsigned int dwc_fast_fls(unsigned long long v)
|
||
+{
|
||
+ /*
|
||
+ * We can be a lot more clever here, but this should take care
|
||
+ * of the most common optimization.
|
||
+ */
|
||
+ if (!(v & 7))
|
||
+ return 3;
|
||
+ else if (!(v & 3))
|
||
+ return 2;
|
||
+ else if (!(v & 1))
|
||
+ return 1;
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ dev_err(chan2dev(&dwc->chan),
|
||
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||
+ channel_readl(dwc, SAR),
|
||
+ channel_readl(dwc, DAR),
|
||
+ channel_readl(dwc, LLP),
|
||
+ channel_readl(dwc, CTL_HI),
|
||
+ channel_readl(dwc, CTL_LO));
|
||
+}
|
||
+
|
||
+static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||
+{
|
||
+ channel_clear_bit(dw, CH_EN, dwc->mask);
|
||
+ while (dma_readl(dw, CH_EN) & dwc->mask)
|
||
+ cpu_relax();
|
||
+}
|
||
+
|
||
+/*----------------------------------------------------------------------*/
|
||
+
|
||
+/* Perform single block transfer */
|
||
+static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
|
||
+ struct dw_desc *desc)
|
||
+{
|
||
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||
+ u32 ctllo;
|
||
+
|
||
+ /* Software emulation of LLP mode relies on interrupts to continue
|
||
+ * multi block transfer. */
|
||
+ ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
|
||
+
|
||
+ channel_writel(dwc, SAR, desc->lli.sar);
|
||
+ channel_writel(dwc, DAR, desc->lli.dar);
|
||
+ channel_writel(dwc, CTL_LO, ctllo);
|
||
+ channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
|
||
+ channel_set_bit(dw, CH_EN, dwc->mask);
|
||
+
|
||
+ /* Move pointer to next descriptor */
|
||
+ dwc->tx_node_active = dwc->tx_node_active->next;
|
||
+}
|
||
+
|
||
+/* Called with dwc->lock held and bh disabled */
|
||
+static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
||
+{
|
||
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||
+ unsigned long was_soft_llp;
|
||
+
|
||
+ /* ASSERT: channel is idle */
|
||
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||
+ dev_err(chan2dev(&dwc->chan),
|
||
+ "BUG: Attempted to start non-idle channel\n");
|
||
+ dwc_dump_chan_regs(dwc);
|
||
+
|
||
+ /* The tasklet will hopefully advance the queue... */
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (dwc->nollp) {
|
||
+ was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
|
||
+ &dwc->flags);
|
||
+ if (was_soft_llp) {
|
||
+ dev_err(chan2dev(&dwc->chan),
|
||
+ "BUG: Attempted to start new LLP transfer "
|
||
+ "inside ongoing one\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ dwc_initialize(dwc);
|
||
+
|
||
+ dwc->residue = first->total_len;
|
||
+ dwc->tx_node_active = &first->tx_list;
|
||
+
|
||
+ /* Submit first block */
|
||
+ dwc_do_single_block(dwc, first);
|
||
+
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ dwc_initialize(dwc);
|
||
+
|
||
+ channel_writel(dwc, LLP, first->txd.phys);
|
||
+ channel_writel(dwc, CTL_LO,
|
||
+ DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
|
||
+ channel_writel(dwc, CTL_HI, 0);
|
||
+ channel_set_bit(dw, CH_EN, dwc->mask);
|
||
+}
|
||
+
|
||
+/*----------------------------------------------------------------------*/
|
||
+
|
||
+static void
|
||
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
||
+ bool callback_required)
|
||
+{
|
||
+ dma_async_tx_callback callback = NULL;
|
||
+ void *param = NULL;
|
||
+ struct dma_async_tx_descriptor *txd = &desc->txd;
|
||
+ struct dw_desc *child;
|
||
+ unsigned long flags;
|
||
+
|
||
+ dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ dma_cookie_complete(txd);
|
||
+ if (callback_required) {
|
||
+ callback = txd->callback;
|
||
+ param = txd->callback_param;
|
||
+ }
|
||
+
|
||
+ /* async_tx_ack */
|
||
+ list_for_each_entry(child, &desc->tx_list, desc_node)
|
||
+ async_tx_ack(&child->txd);
|
||
+ async_tx_ack(&desc->txd);
|
||
+
|
||
+ list_splice_init(&desc->tx_list, &dwc->free_list);
|
||
+ list_move(&desc->desc_node, &dwc->free_list);
|
||
+
|
||
+ if (!is_slave_direction(dwc->direction)) {
|
||
+ struct device *parent = chan2parent(&dwc->chan);
|
||
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
|
||
+ dma_unmap_single(parent, desc->lli.dar,
|
||
+ desc->total_len, DMA_FROM_DEVICE);
|
||
+ else
|
||
+ dma_unmap_page(parent, desc->lli.dar,
|
||
+ desc->total_len, DMA_FROM_DEVICE);
|
||
+ }
|
||
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
|
||
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
|
||
+ dma_unmap_single(parent, desc->lli.sar,
|
||
+ desc->total_len, DMA_TO_DEVICE);
|
||
+ else
|
||
+ dma_unmap_page(parent, desc->lli.sar,
|
||
+ desc->total_len, DMA_TO_DEVICE);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ if (callback)
|
||
+ callback(param);
|
||
+}
|
||
+
|
||
+static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||
+{
|
||
+ struct dw_desc *desc, *_desc;
|
||
+ LIST_HEAD(list);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||
+ dev_err(chan2dev(&dwc->chan),
|
||
+ "BUG: XFER bit set, but channel not idle!\n");
|
||
+
|
||
+ /* Try to continue after resetting the channel... */
|
||
+ dwc_chan_disable(dw, dwc);
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Submit queued descriptors ASAP, i.e. before we go through
|
||
+ * the completed ones.
|
||
+ */
|
||
+ list_splice_init(&dwc->active_list, &list);
|
||
+ if (!list_empty(&dwc->queue)) {
|
||
+ list_move(dwc->queue.next, &dwc->active_list);
|
||
+ dwc_dostart(dwc, dwc_first_active(dwc));
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||
+ dwc_descriptor_complete(dwc, desc, true);
|
||
+}
|
||
+
|
||
+/* Returns how many bytes were already received from source */
|
||
+static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ u32 ctlhi = channel_readl(dwc, CTL_HI);
|
||
+ u32 ctllo = channel_readl(dwc, CTL_LO);
|
||
+
|
||
+ return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
|
||
+}
|
||
+
|
||
+static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||
+{
|
||
+ dma_addr_t llp;
|
||
+ struct dw_desc *desc, *_desc;
|
||
+ struct dw_desc *child;
|
||
+ u32 status_xfer;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ llp = channel_readl(dwc, LLP);
|
||
+ status_xfer = dma_readl(dw, RAW.XFER);
|
||
+
|
||
+ if (status_xfer & dwc->mask) {
|
||
+ /* Everything we've submitted is done */
|
||
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||
+
|
||
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
|
||
+ struct list_head *head, *active = dwc->tx_node_active;
|
||
+
|
||
+ /*
|
||
+ * We are inside first active descriptor.
|
||
+ * Otherwise something is really wrong.
|
||
+ */
|
||
+ desc = dwc_first_active(dwc);
|
||
+
|
||
+ head = &desc->tx_list;
|
||
+ if (active != head) {
|
||
+ /* Update desc to reflect last sent one */
|
||
+ if (active != head->next)
|
||
+ desc = to_dw_desc(active->prev);
|
||
+
|
||
+ dwc->residue -= desc->len;
|
||
+
|
||
+ child = to_dw_desc(active);
|
||
+
|
||
+ /* Submit next block */
|
||
+ dwc_do_single_block(dwc, child);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ /* We are done here */
|
||
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
|
||
+ }
|
||
+
|
||
+ dwc->residue = 0;
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ dwc_complete_all(dw, dwc);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (list_empty(&dwc->active_list)) {
|
||
+ dwc->residue = 0;
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
|
||
+ dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
|
||
+ (unsigned long long)llp);
|
||
+
|
||
+ list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
||
+ /* Initial residue value */
|
||
+ dwc->residue = desc->total_len;
|
||
+
|
||
+ /* Check first descriptors addr */
|
||
+ if (desc->txd.phys == llp) {
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ /* Check first descriptors llp */
|
||
+ if (desc->lli.llp == llp) {
|
||
+ /* This one is currently in progress */
|
||
+ dwc->residue -= dwc_get_sent(dwc);
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ dwc->residue -= desc->len;
|
||
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
|
||
+ if (child->lli.llp == llp) {
|
||
+ /* Currently in progress */
|
||
+ dwc->residue -= dwc_get_sent(dwc);
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+ dwc->residue -= child->len;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * No descriptors so far seem to be in progress, i.e.
|
||
+ * this one must be done.
|
||
+ */
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ dwc_descriptor_complete(dwc, desc, true);
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ }
|
||
+
|
||
+ dev_err(chan2dev(&dwc->chan),
|
||
+ "BUG: All descriptors done, but channel not idle!\n");
|
||
+
|
||
+ /* Try to continue after resetting the channel... */
|
||
+ dwc_chan_disable(dw, dwc);
|
||
+
|
||
+ if (!list_empty(&dwc->queue)) {
|
||
+ list_move(dwc->queue.next, &dwc->active_list);
|
||
+ dwc_dostart(dwc, dwc_first_active(dwc));
|
||
+ }
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+}
|
||
+
|
||
+static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
|
||
+{
|
||
+ dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
|
||
+ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
|
||
+}
|
||
+
|
||
+static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||
+{
|
||
+ struct dw_desc *bad_desc;
|
||
+ struct dw_desc *child;
|
||
+ unsigned long flags;
|
||
+
|
||
+ dwc_scan_descriptors(dw, dwc);
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ /*
|
||
+ * The descriptor currently at the head of the active list is
|
||
+ * borked. Since we don't have any way to report errors, we'll
|
||
+ * just have to scream loudly and try to carry on.
|
||
+ */
|
||
+ bad_desc = dwc_first_active(dwc);
|
||
+ list_del_init(&bad_desc->desc_node);
|
||
+ list_move(dwc->queue.next, dwc->active_list.prev);
|
||
+
|
||
+ /* Clear the error flag and try to restart the controller */
|
||
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||
+ if (!list_empty(&dwc->active_list))
|
||
+ dwc_dostart(dwc, dwc_first_active(dwc));
|
||
+
|
||
+ /*
|
||
+ * WARN may seem harsh, but since this only happens
|
||
+ * when someone submits a bad physical address in a
|
||
+ * descriptor, we should consider ourselves lucky that the
|
||
+ * controller flagged an error instead of scribbling over
|
||
+ * random memory locations.
|
||
+ */
|
||
+ dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
|
||
+ " cookie: %d\n", bad_desc->txd.cookie);
|
||
+ dwc_dump_lli(dwc, &bad_desc->lli);
|
||
+ list_for_each_entry(child, &bad_desc->tx_list, desc_node)
|
||
+ dwc_dump_lli(dwc, &child->lli);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ /* Pretend the descriptor completed successfully */
|
||
+ dwc_descriptor_complete(dwc, bad_desc, true);
|
||
+}
|
||
+
|
||
+/* --------------------- Cyclic DMA API extensions -------------------- */
|
||
+
|
||
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ return channel_readl(dwc, SAR);
|
||
+}
|
||
+EXPORT_SYMBOL(dw_dma_get_src_addr);
|
||
+
|
||
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ return channel_readl(dwc, DAR);
|
||
+}
|
||
+EXPORT_SYMBOL(dw_dma_get_dst_addr);
|
||
+
|
||
+/* Called with dwc->lock held and all DMAC interrupts disabled */
|
||
+static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
||
+ u32 status_err, u32 status_xfer)
|
||
+{
|
||
+ unsigned long flags;
|
||
+
|
||
+ if (dwc->mask) {
|
||
+ void (*callback)(void *param);
|
||
+ void *callback_param;
|
||
+
|
||
+ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
|
||
+ channel_readl(dwc, LLP));
|
||
+
|
||
+ callback = dwc->cdesc->period_callback;
|
||
+ callback_param = dwc->cdesc->period_callback_param;
|
||
+
|
||
+ if (callback)
|
||
+ callback(callback_param);
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Error and transfer complete are highly unlikely, and will most
|
||
+ * likely be due to a configuration error by the user.
|
||
+ */
|
||
+ if (unlikely(status_err & dwc->mask) ||
|
||
+ unlikely(status_xfer & dwc->mask)) {
|
||
+ int i;
|
||
+
|
||
+ dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
|
||
+ "interrupt, stopping DMA transfer\n",
|
||
+ status_xfer ? "xfer" : "error");
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ dwc_dump_chan_regs(dwc);
|
||
+
|
||
+ dwc_chan_disable(dw, dwc);
|
||
+
|
||
+ /* Make sure DMA does not restart by loading a new list */
|
||
+ channel_writel(dwc, LLP, 0);
|
||
+ channel_writel(dwc, CTL_LO, 0);
|
||
+ channel_writel(dwc, CTL_HI, 0);
|
||
+
|
||
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||
+
|
||
+ for (i = 0; i < dwc->cdesc->periods; i++)
|
||
+ dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ }
|
||
+}
|
||
+
|
||
+/* ------------------------------------------------------------------------- */
|
||
+
|
||
+static void dw_dma_tasklet(unsigned long data)
|
||
+{
|
||
+ struct dw_dma *dw = (struct dw_dma *)data;
|
||
+ struct dw_dma_chan *dwc;
|
||
+ u32 status_xfer;
|
||
+ u32 status_err;
|
||
+ int i;
|
||
+
|
||
+ status_xfer = dma_readl(dw, RAW.XFER);
|
||
+ status_err = dma_readl(dw, RAW.ERROR);
|
||
+
|
||
+ dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
|
||
+
|
||
+ for (i = 0; i < dw->dma.chancnt; i++) {
|
||
+ dwc = &dw->chan[i];
|
||
+ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
|
||
+ dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
|
||
+ else if (status_err & (1 << i))
|
||
+ dwc_handle_error(dw, dwc);
|
||
+ else if (status_xfer & (1 << i))
|
||
+ dwc_scan_descriptors(dw, dwc);
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Re-enable interrupts.
|
||
+ */
|
||
+ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
|
||
+ channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
|
||
+}
|
||
+
|
||
+static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
|
||
+{
|
||
+ struct dw_dma *dw = dev_id;
|
||
+ u32 status;
|
||
+
|
||
+ dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
|
||
+ dma_readl(dw, STATUS_INT));
|
||
+
|
||
+ /*
|
||
+ * Just disable the interrupts. We'll turn them back on in the
|
||
+ * softirq handler.
|
||
+ */
|
||
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
|
||
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
|
||
+
|
||
+ status = dma_readl(dw, STATUS_INT);
|
||
+ if (status) {
|
||
+ dev_err(dw->dma.dev,
|
||
+ "BUG: Unexpected interrupts pending: 0x%x\n",
|
||
+ status);
|
||
+
|
||
+ /* Try to recover */
|
||
+ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
|
||
+ channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
|
||
+ channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
|
||
+ channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
|
||
+ }
|
||
+
|
||
+ tasklet_schedule(&dw->tasklet);
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+/*----------------------------------------------------------------------*/
|
||
+
|
||
+static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct dw_desc *desc = txd_to_dw_desc(tx);
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
|
||
+ dma_cookie_t cookie;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ cookie = dma_cookie_assign(tx);
|
||
+
|
||
+ /*
|
||
+ * REVISIT: We should attempt to chain as many descriptors as
|
||
+ * possible, perhaps even appending to those already submitted
|
||
+ * for DMA. But this is hard to do in a race-free manner.
|
||
+ */
|
||
+ if (list_empty(&dwc->active_list)) {
|
||
+ dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
|
||
+ desc->txd.cookie);
|
||
+ list_add_tail(&desc->desc_node, &dwc->active_list);
|
||
+ dwc_dostart(dwc, dwc_first_active(dwc));
|
||
+ } else {
|
||
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
|
||
+ desc->txd.cookie);
|
||
+
|
||
+ list_add_tail(&desc->desc_node, &dwc->queue);
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
||
+ size_t len, unsigned long flags)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(chan->device);
|
||
+ struct dw_desc *desc;
|
||
+ struct dw_desc *first;
|
||
+ struct dw_desc *prev;
|
||
+ size_t xfer_count;
|
||
+ size_t offset;
|
||
+ unsigned int src_width;
|
||
+ unsigned int dst_width;
|
||
+ unsigned int data_width;
|
||
+ u32 ctllo;
|
||
+
|
||
+ dev_vdbg(chan2dev(chan),
|
||
+ "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
|
||
+ (unsigned long long)dest, (unsigned long long)src,
|
||
+ len, flags);
|
||
+
|
||
+ if (unlikely(!len)) {
|
||
+ dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ dwc->direction = DMA_MEM_TO_MEM;
|
||
+
|
||
+ data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
|
||
+ dw->data_width[dwc->dst_master]);
|
||
+
|
||
+ src_width = dst_width = min_t(unsigned int, data_width,
|
||
+ dwc_fast_fls(src | dest | len));
|
||
+
|
||
+ ctllo = DWC_DEFAULT_CTLLO(chan)
|
||
+ | DWC_CTLL_DST_WIDTH(dst_width)
|
||
+ | DWC_CTLL_SRC_WIDTH(src_width)
|
||
+ | DWC_CTLL_DST_INC
|
||
+ | DWC_CTLL_SRC_INC
|
||
+ | DWC_CTLL_FC_M2M;
|
||
+ prev = first = NULL;
|
||
+
|
||
+ for (offset = 0; offset < len; offset += xfer_count << src_width) {
|
||
+ xfer_count = min_t(size_t, (len - offset) >> src_width,
|
||
+ dwc->block_size);
|
||
+
|
||
+ desc = dwc_desc_get(dwc);
|
||
+ if (!desc)
|
||
+ goto err_desc_get;
|
||
+
|
||
+ desc->lli.sar = src + offset;
|
||
+ desc->lli.dar = dest + offset;
|
||
+ desc->lli.ctllo = ctllo;
|
||
+ desc->lli.ctlhi = xfer_count;
|
||
+ desc->len = xfer_count << src_width;
|
||
+
|
||
+ if (!first) {
|
||
+ first = desc;
|
||
+ } else {
|
||
+ prev->lli.llp = desc->txd.phys;
|
||
+ list_add_tail(&desc->desc_node,
|
||
+ &first->tx_list);
|
||
+ }
|
||
+ prev = desc;
|
||
+ }
|
||
+
|
||
+ if (flags & DMA_PREP_INTERRUPT)
|
||
+ /* Trigger interrupt after last block */
|
||
+ prev->lli.ctllo |= DWC_CTLL_INT_EN;
|
||
+
|
||
+ prev->lli.llp = 0;
|
||
+ first->txd.flags = flags;
|
||
+ first->total_len = len;
|
||
+
|
||
+ return &first->txd;
|
||
+
|
||
+err_desc_get:
|
||
+ dwc_desc_put(dwc, first);
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||
+ unsigned int sg_len, enum dma_transfer_direction direction,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(chan->device);
|
||
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
|
||
+ struct dw_desc *prev;
|
||
+ struct dw_desc *first;
|
||
+ u32 ctllo;
|
||
+ dma_addr_t reg;
|
||
+ unsigned int reg_width;
|
||
+ unsigned int mem_width;
|
||
+ unsigned int data_width;
|
||
+ unsigned int i;
|
||
+ struct scatterlist *sg;
|
||
+ size_t total_len = 0;
|
||
+
|
||
+ dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||
+
|
||
+ if (unlikely(!is_slave_direction(direction) || !sg_len))
|
||
+ return NULL;
|
||
+
|
||
+ dwc->direction = direction;
|
||
+
|
||
+ prev = first = NULL;
|
||
+
|
||
+ switch (direction) {
|
||
+ case DMA_MEM_TO_DEV:
|
||
+ reg_width = __fls(sconfig->dst_addr_width);
|
||
+ reg = sconfig->dst_addr;
|
||
+ ctllo = (DWC_DEFAULT_CTLLO(chan)
|
||
+ | DWC_CTLL_DST_WIDTH(reg_width)
|
||
+ | DWC_CTLL_DST_FIX
|
||
+ | DWC_CTLL_SRC_INC);
|
||
+
|
||
+ ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
|
||
+ DWC_CTLL_FC(DW_DMA_FC_D_M2P);
|
||
+
|
||
+ data_width = dw->data_width[dwc->src_master];
|
||
+
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ struct dw_desc *desc;
|
||
+ u32 len, dlen, mem;
|
||
+
|
||
+ mem = sg_dma_address(sg);
|
||
+ len = sg_dma_len(sg);
|
||
+
|
||
+ mem_width = min_t(unsigned int,
|
||
+ data_width, dwc_fast_fls(mem | len));
|
||
+
|
||
+slave_sg_todev_fill_desc:
|
||
+ desc = dwc_desc_get(dwc);
|
||
+ if (!desc) {
|
||
+ dev_err(chan2dev(chan),
|
||
+ "not enough descriptors available\n");
|
||
+ goto err_desc_get;
|
||
+ }
|
||
+
|
||
+ desc->lli.sar = mem;
|
||
+ desc->lli.dar = reg;
|
||
+ desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
|
||
+ if ((len >> mem_width) > dwc->block_size) {
|
||
+ dlen = dwc->block_size << mem_width;
|
||
+ mem += dlen;
|
||
+ len -= dlen;
|
||
+ } else {
|
||
+ dlen = len;
|
||
+ len = 0;
|
||
+ }
|
||
+
|
||
+ desc->lli.ctlhi = dlen >> mem_width;
|
||
+ desc->len = dlen;
|
||
+
|
||
+ if (!first) {
|
||
+ first = desc;
|
||
+ } else {
|
||
+ prev->lli.llp = desc->txd.phys;
|
||
+ list_add_tail(&desc->desc_node,
|
||
+ &first->tx_list);
|
||
+ }
|
||
+ prev = desc;
|
||
+ total_len += dlen;
|
||
+
|
||
+ if (len)
|
||
+ goto slave_sg_todev_fill_desc;
|
||
+ }
|
||
+ break;
|
||
+ case DMA_DEV_TO_MEM:
|
||
+ reg_width = __fls(sconfig->src_addr_width);
|
||
+ reg = sconfig->src_addr;
|
||
+ ctllo = (DWC_DEFAULT_CTLLO(chan)
|
||
+ | DWC_CTLL_SRC_WIDTH(reg_width)
|
||
+ | DWC_CTLL_DST_INC
|
||
+ | DWC_CTLL_SRC_FIX);
|
||
+
|
||
+ ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
|
||
+ DWC_CTLL_FC(DW_DMA_FC_D_P2M);
|
||
+
|
||
+ data_width = dw->data_width[dwc->dst_master];
|
||
+
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ struct dw_desc *desc;
|
||
+ u32 len, dlen, mem;
|
||
+
|
||
+ mem = sg_dma_address(sg);
|
||
+ len = sg_dma_len(sg);
|
||
+
|
||
+ mem_width = min_t(unsigned int,
|
||
+ data_width, dwc_fast_fls(mem | len));
|
||
+
|
||
+slave_sg_fromdev_fill_desc:
|
||
+ desc = dwc_desc_get(dwc);
|
||
+ if (!desc) {
|
||
+ dev_err(chan2dev(chan),
|
||
+ "not enough descriptors available\n");
|
||
+ goto err_desc_get;
|
||
+ }
|
||
+
|
||
+ desc->lli.sar = reg;
|
||
+ desc->lli.dar = mem;
|
||
+ desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
|
||
+ if ((len >> reg_width) > dwc->block_size) {
|
||
+ dlen = dwc->block_size << reg_width;
|
||
+ mem += dlen;
|
||
+ len -= dlen;
|
||
+ } else {
|
||
+ dlen = len;
|
||
+ len = 0;
|
||
+ }
|
||
+ desc->lli.ctlhi = dlen >> reg_width;
|
||
+ desc->len = dlen;
|
||
+
|
||
+ if (!first) {
|
||
+ first = desc;
|
||
+ } else {
|
||
+ prev->lli.llp = desc->txd.phys;
|
||
+ list_add_tail(&desc->desc_node,
|
||
+ &first->tx_list);
|
||
+ }
|
||
+ prev = desc;
|
||
+ total_len += dlen;
|
||
+
|
||
+ if (len)
|
||
+ goto slave_sg_fromdev_fill_desc;
|
||
+ }
|
||
+ break;
|
||
+ default:
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (flags & DMA_PREP_INTERRUPT)
|
||
+ /* Trigger interrupt after last block */
|
||
+ prev->lli.ctllo |= DWC_CTLL_INT_EN;
|
||
+
|
||
+ prev->lli.llp = 0;
|
||
+ first->total_len = total_len;
|
||
+
|
||
+ return &first->txd;
|
||
+
|
||
+err_desc_get:
|
||
+ dwc_desc_put(dwc, first);
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
|
||
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
|
||
+ *
|
||
+ * NOTE: burst size 2 is not supported by controller.
|
||
+ *
|
||
+ * This can be done by finding least significant bit set: n & (n - 1)
|
||
+ */
|
||
+static inline void convert_burst(u32 *maxburst)
|
||
+{
|
||
+ if (*maxburst > 1)
|
||
+ *maxburst = fls(*maxburst) - 2;
|
||
+ else
|
||
+ *maxburst = 0;
|
||
+}
|
||
+
|
||
+static int
|
||
+set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+
|
||
+ /* Check if chan will be configured for slave transfers */
|
||
+ if (!is_slave_direction(sconfig->direction))
|
||
+ return -EINVAL;
|
||
+
|
||
+ memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
|
||
+ dwc->direction = sconfig->direction;
|
||
+
|
||
+ /* Take the request line from slave_id member */
|
||
+ if (dwc->request_line == ~0)
|
||
+ dwc->request_line = sconfig->slave_id;
|
||
+
|
||
+ convert_burst(&dwc->dma_sconfig.src_maxburst);
|
||
+ convert_burst(&dwc->dma_sconfig.dst_maxburst);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ u32 cfglo = channel_readl(dwc, CFG_LO);
|
||
+ unsigned int count = 20; /* timeout iterations */
|
||
+
|
||
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
|
||
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
|
||
+ udelay(2);
|
||
+
|
||
+ dwc->paused = true;
|
||
+}
|
||
+
|
||
+static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ u32 cfglo = channel_readl(dwc, CFG_LO);
|
||
+
|
||
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
|
||
+
|
||
+ dwc->paused = false;
|
||
+}
|
||
+
|
||
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(chan->device);
|
||
+ struct dw_desc *desc, *_desc;
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(list);
|
||
+
|
||
+ if (cmd == DMA_PAUSE) {
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ dwc_chan_pause(dwc);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ } else if (cmd == DMA_RESUME) {
|
||
+ if (!dwc->paused)
|
||
+ return 0;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ dwc_chan_resume(dwc);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ } else if (cmd == DMA_TERMINATE_ALL) {
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
|
||
+
|
||
+ dwc_chan_disable(dw, dwc);
|
||
+
|
||
+ dwc_chan_resume(dwc);
|
||
+
|
||
+ /* active_list entries will end up before queued entries */
|
||
+ list_splice_init(&dwc->queue, &list);
|
||
+ list_splice_init(&dwc->active_list, &list);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ /* Flush all pending and queued descriptors */
|
||
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||
+ dwc_descriptor_complete(dwc, desc, false);
|
||
+ } else if (cmd == DMA_SLAVE_CONFIG) {
|
||
+ return set_runtime_config(chan, (struct dma_slave_config *)arg);
|
||
+ } else {
|
||
+ return -ENXIO;
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ unsigned long flags;
|
||
+ u32 residue;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ residue = dwc->residue;
|
||
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
|
||
+ residue -= dwc_get_sent(dwc);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return residue;
|
||
+}
|
||
+
|
||
+static enum dma_status
|
||
+dwc_tx_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie,
|
||
+ struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ enum dma_status ret;
|
||
+
|
||
+ ret = dma_cookie_status(chan, cookie, txstate);
|
||
+ if (ret != DMA_SUCCESS) {
|
||
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
||
+
|
||
+ ret = dma_cookie_status(chan, cookie, txstate);
|
||
+ }
|
||
+
|
||
+ if (ret != DMA_SUCCESS)
|
||
+ dma_set_residue(txstate, dwc_get_residue(dwc));
|
||
+
|
||
+ if (dwc->paused)
|
||
+ return DMA_PAUSED;
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void dwc_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+
|
||
+ if (!list_empty(&dwc->queue))
|
||
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
||
+}
|
||
+
|
||
+static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(chan->device);
|
||
+ struct dw_desc *desc;
|
||
+ int i;
|
||
+ unsigned long flags;
|
||
+
|
||
+ dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||
+
|
||
+ /* ASSERT: channel is idle */
|
||
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||
+ dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
|
||
+ return -EIO;
|
||
+ }
|
||
+
|
||
+ dma_cookie_init(chan);
|
||
+
|
||
+ /*
|
||
+ * NOTE: some controllers may have additional features that we
|
||
+ * need to initialize here, like "scatter-gather" (which
|
||
+ * doesn't mean what you think it means), and status writeback.
|
||
+ */
|
||
+
|
||
+ dwc_set_masters(dwc);
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ i = dwc->descs_allocated;
|
||
+ while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
|
||
+ dma_addr_t phys;
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
|
||
+ if (!desc)
|
||
+ goto err_desc_alloc;
|
||
+
|
||
+ memset(desc, 0, sizeof(struct dw_desc));
|
||
+
|
||
+ INIT_LIST_HEAD(&desc->tx_list);
|
||
+ dma_async_tx_descriptor_init(&desc->txd, chan);
|
||
+ desc->txd.tx_submit = dwc_tx_submit;
|
||
+ desc->txd.flags = DMA_CTRL_ACK;
|
||
+ desc->txd.phys = phys;
|
||
+
|
||
+ dwc_desc_put(dwc, desc);
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ i = ++dwc->descs_allocated;
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
|
||
+
|
||
+ return i;
|
||
+
|
||
+err_desc_alloc:
|
||
+ dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
|
||
+
|
||
+ return i;
|
||
+}
|
||
+
|
||
+static void dwc_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(chan->device);
|
||
+ struct dw_desc *desc, *_desc;
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(list);
|
||
+
|
||
+ dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
|
||
+ dwc->descs_allocated);
|
||
+
|
||
+ /* ASSERT: channel is idle */
|
||
+ BUG_ON(!list_empty(&dwc->active_list));
|
||
+ BUG_ON(!list_empty(&dwc->queue));
|
||
+ BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ list_splice_init(&dwc->free_list, &list);
|
||
+ dwc->descs_allocated = 0;
|
||
+ dwc->initialized = false;
|
||
+ dwc->request_line = ~0;
|
||
+
|
||
+ /* Disable interrupts */
|
||
+ channel_clear_bit(dw, MASK.XFER, dwc->mask);
|
||
+ channel_clear_bit(dw, MASK.ERROR, dwc->mask);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
|
||
+ dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
|
||
+ dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
|
||
+ }
|
||
+
|
||
+ dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
||
+}
|
||
+
|
||
+/* --------------------- Cyclic DMA API extensions -------------------- */
|
||
+
|
||
+/**
|
||
+ * dw_dma_cyclic_start - start the cyclic DMA transfer
|
||
+ * @chan: the DMA channel to start
|
||
+ *
|
||
+ * Must be called with soft interrupts disabled. Returns zero on success or
|
||
+ * -errno on failure.
|
||
+ */
|
||
+int dw_dma_cyclic_start(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||
+ unsigned long flags;
|
||
+
|
||
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
|
||
+ dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ /* Assert channel is idle */
|
||
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||
+ dev_err(chan2dev(&dwc->chan),
|
||
+ "BUG: Attempted to start non-idle channel\n");
|
||
+ dwc_dump_chan_regs(dwc);
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ return -EBUSY;
|
||
+ }
|
||
+
|
||
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||
+
|
||
+ /* Setup DMAC channel registers */
|
||
+ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
|
||
+ channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
|
||
+ channel_writel(dwc, CTL_HI, 0);
|
||
+
|
||
+ channel_set_bit(dw, CH_EN, dwc->mask);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL(dw_dma_cyclic_start);
|
||
+
|
||
+/**
|
||
+ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
|
||
+ * @chan: the DMA channel to stop
|
||
+ *
|
||
+ * Must be called with soft interrupts disabled.
|
||
+ */
|
||
+void dw_dma_cyclic_stop(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ dwc_chan_disable(dw, dwc);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+}
|
||
+EXPORT_SYMBOL(dw_dma_cyclic_stop);
|
||
+
|
||
+/**
|
||
+ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
|
||
+ * @chan: the DMA channel to prepare
|
||
+ * @buf_addr: physical DMA address where the buffer starts
|
||
+ * @buf_len: total number of bytes for the entire buffer
|
||
+ * @period_len: number of bytes for each period
|
||
+ * @direction: transfer direction, to or from device
|
||
+ *
|
||
+ * Must be called before trying to start the transfer. Returns a valid struct
|
||
+ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
|
||
+ */
|
||
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
||
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
|
||
+ enum dma_transfer_direction direction)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
|
||
+ struct dw_cyclic_desc *cdesc;
|
||
+ struct dw_cyclic_desc *retval = NULL;
|
||
+ struct dw_desc *desc;
|
||
+ struct dw_desc *last = NULL;
|
||
+ unsigned long was_cyclic;
|
||
+ unsigned int reg_width;
|
||
+ unsigned int periods;
|
||
+ unsigned int i;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+ if (dwc->nollp) {
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ dev_dbg(chan2dev(&dwc->chan),
|
||
+ "channel doesn't support LLP transfers\n");
|
||
+ return ERR_PTR(-EINVAL);
|
||
+ }
|
||
+
|
||
+ if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ dev_dbg(chan2dev(&dwc->chan),
|
||
+ "queue and/or active list are not empty\n");
|
||
+ return ERR_PTR(-EBUSY);
|
||
+ }
|
||
+
|
||
+ was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+ if (was_cyclic) {
|
||
+ dev_dbg(chan2dev(&dwc->chan),
|
||
+ "channel already prepared for cyclic DMA\n");
|
||
+ return ERR_PTR(-EBUSY);
|
||
+ }
|
||
+
|
||
+ retval = ERR_PTR(-EINVAL);
|
||
+
|
||
+ if (unlikely(!is_slave_direction(direction)))
|
||
+ goto out_err;
|
||
+
|
||
+ dwc->direction = direction;
|
||
+
|
||
+ if (direction == DMA_MEM_TO_DEV)
|
||
+ reg_width = __ffs(sconfig->dst_addr_width);
|
||
+ else
|
||
+ reg_width = __ffs(sconfig->src_addr_width);
|
||
+
|
||
+ periods = buf_len / period_len;
|
||
+
|
||
+ /* Check for too big/unaligned periods and unaligned DMA buffer. */
|
||
+ if (period_len > (dwc->block_size << reg_width))
|
||
+ goto out_err;
|
||
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
|
||
+ goto out_err;
|
||
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
||
+ goto out_err;
|
||
+
|
||
+ retval = ERR_PTR(-ENOMEM);
|
||
+
|
||
+ if (periods > NR_DESCS_PER_CHANNEL)
|
||
+ goto out_err;
|
||
+
|
||
+ cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
|
||
+ if (!cdesc)
|
||
+ goto out_err;
|
||
+
|
||
+ cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
|
||
+ if (!cdesc->desc)
|
||
+ goto out_err_alloc;
|
||
+
|
||
+ for (i = 0; i < periods; i++) {
|
||
+ desc = dwc_desc_get(dwc);
|
||
+ if (!desc)
|
||
+ goto out_err_desc_get;
|
||
+
|
||
+ switch (direction) {
|
||
+ case DMA_MEM_TO_DEV:
|
||
+ desc->lli.dar = sconfig->dst_addr;
|
||
+ desc->lli.sar = buf_addr + (period_len * i);
|
||
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
|
||
+ | DWC_CTLL_DST_WIDTH(reg_width)
|
||
+ | DWC_CTLL_SRC_WIDTH(reg_width)
|
||
+ | DWC_CTLL_DST_FIX
|
||
+ | DWC_CTLL_SRC_INC
|
||
+ | DWC_CTLL_INT_EN);
|
||
+
|
||
+ desc->lli.ctllo |= sconfig->device_fc ?
|
||
+ DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
|
||
+ DWC_CTLL_FC(DW_DMA_FC_D_M2P);
|
||
+
|
||
+ break;
|
||
+ case DMA_DEV_TO_MEM:
|
||
+ desc->lli.dar = buf_addr + (period_len * i);
|
||
+ desc->lli.sar = sconfig->src_addr;
|
||
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
|
||
+ | DWC_CTLL_SRC_WIDTH(reg_width)
|
||
+ | DWC_CTLL_DST_WIDTH(reg_width)
|
||
+ | DWC_CTLL_DST_INC
|
||
+ | DWC_CTLL_SRC_FIX
|
||
+ | DWC_CTLL_INT_EN);
|
||
+
|
||
+ desc->lli.ctllo |= sconfig->device_fc ?
|
||
+ DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
|
||
+ DWC_CTLL_FC(DW_DMA_FC_D_P2M);
|
||
+
|
||
+ break;
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ desc->lli.ctlhi = (period_len >> reg_width);
|
||
+ cdesc->desc[i] = desc;
|
||
+
|
||
+ if (last)
|
||
+ last->lli.llp = desc->txd.phys;
|
||
+
|
||
+ last = desc;
|
||
+ }
|
||
+
|
||
+ /* Let's make a cyclic list */
|
||
+ last->lli.llp = cdesc->desc[0]->txd.phys;
|
||
+
|
||
+ dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
|
||
+ "period %zu periods %d\n", (unsigned long long)buf_addr,
|
||
+ buf_len, period_len, periods);
|
||
+
|
||
+ cdesc->periods = periods;
|
||
+ dwc->cdesc = cdesc;
|
||
+
|
||
+ return cdesc;
|
||
+
|
||
+out_err_desc_get:
|
||
+ while (i--)
|
||
+ dwc_desc_put(dwc, cdesc->desc[i]);
|
||
+out_err_alloc:
|
||
+ kfree(cdesc);
|
||
+out_err:
|
||
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
||
+ return (struct dw_cyclic_desc *)retval;
|
||
+}
|
||
+EXPORT_SYMBOL(dw_dma_cyclic_prep);
|
||
+
|
||
+/**
|
||
+ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
|
||
+ * @chan: the DMA channel to free
|
||
+ */
|
||
+void dw_dma_cyclic_free(struct dma_chan *chan)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||
+ struct dw_cyclic_desc *cdesc = dwc->cdesc;
|
||
+ int i;
|
||
+ unsigned long flags;
|
||
+
|
||
+ dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
|
||
+
|
||
+ if (!cdesc)
|
||
+ return;
|
||
+
|
||
+ spin_lock_irqsave(&dwc->lock, flags);
|
||
+
|
||
+ dwc_chan_disable(dw, dwc);
|
||
+
|
||
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||
+
|
||
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
||
+
|
||
+ for (i = 0; i < cdesc->periods; i++)
|
||
+ dwc_desc_put(dwc, cdesc->desc[i]);
|
||
+
|
||
+ kfree(cdesc->desc);
|
||
+ kfree(cdesc);
|
||
+
|
||
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
||
+}
|
||
+EXPORT_SYMBOL(dw_dma_cyclic_free);
|
||
+
|
||
+/*----------------------------------------------------------------------*/
|
||
+
|
||
+static void dw_dma_off(struct dw_dma *dw)
|
||
+{
|
||
+ int i;
|
||
+
|
||
+ dma_writel(dw, CFG, 0);
|
||
+
|
||
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
|
||
+ channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
|
||
+ channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
|
||
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
|
||
+
|
||
+ while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
|
||
+ cpu_relax();
|
||
+
|
||
+ for (i = 0; i < dw->dma.chancnt; i++)
|
||
+ dw->chan[i].initialized = false;
|
||
+}
|
||
+
|
||
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
|
||
+{
|
||
+ struct dw_dma *dw;
|
||
+ size_t size;
|
||
+ bool autocfg;
|
||
+ unsigned int dw_params;
|
||
+ unsigned int nr_channels;
|
||
+ unsigned int max_blk_size = 0;
|
||
+ int err;
|
||
+ int i;
|
||
+
|
||
+ dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
|
||
+ autocfg = dw_params >> DW_PARAMS_EN & 0x1;
|
||
+
|
||
+ dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
|
||
+
|
||
+ if (!pdata && autocfg) {
|
||
+ pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
|
||
+ if (!pdata)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ /* Fill platform data with the default values */
|
||
+ pdata->is_private = true;
|
||
+ pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
|
||
+ pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
|
||
+ } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (autocfg)
|
||
+ nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
|
||
+ else
|
||
+ nr_channels = pdata->nr_channels;
|
||
+
|
||
+ size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
|
||
+ dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
|
||
+ if (!dw)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ dw->clk = devm_clk_get(chip->dev, "hclk");
|
||
+ if (IS_ERR(dw->clk))
|
||
+ return PTR_ERR(dw->clk);
|
||
+ clk_prepare_enable(dw->clk);
|
||
+
|
||
+ dw->regs = chip->regs;
|
||
+ chip->dw = dw;
|
||
+
|
||
+ /* Get hardware configuration parameters */
|
||
+ if (autocfg) {
|
||
+ max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
|
||
+
|
||
+ dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
|
||
+ for (i = 0; i < dw->nr_masters; i++) {
|
||
+ dw->data_width[i] =
|
||
+ (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
|
||
+ }
|
||
+ } else {
|
||
+ dw->nr_masters = pdata->nr_masters;
|
||
+ memcpy(dw->data_width, pdata->data_width, 4);
|
||
+ }
|
||
+
|
||
+ /* Calculate all channel mask before DMA setup */
|
||
+ dw->all_chan_mask = (1 << nr_channels) - 1;
|
||
+
|
||
+ /* Force dma off, just in case */
|
||
+ dw_dma_off(dw);
|
||
+
|
||
+ /* Disable BLOCK interrupts as well */
|
||
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
|
||
+
|
||
+ err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
|
||
+ "dw_dmac", dw);
|
||
+ if (err)
|
||
+ return err;
|
||
+
|
||
+ /* Create a pool of consistent memory blocks for hardware descriptors */
|
||
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
|
||
+ sizeof(struct dw_desc), 4, 0);
|
||
+ if (!dw->desc_pool) {
|
||
+ dev_err(chip->dev, "No memory for descriptors dma pool\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
|
||
+
|
||
+ INIT_LIST_HEAD(&dw->dma.channels);
|
||
+ for (i = 0; i < nr_channels; i++) {
|
||
+ struct dw_dma_chan *dwc = &dw->chan[i];
|
||
+ int r = nr_channels - i - 1;
|
||
+
|
||
+ dwc->chan.device = &dw->dma;
|
||
+ dma_cookie_init(&dwc->chan);
|
||
+ if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
|
||
+ list_add_tail(&dwc->chan.device_node,
|
||
+ &dw->dma.channels);
|
||
+ else
|
||
+ list_add(&dwc->chan.device_node, &dw->dma.channels);
|
||
+
|
||
+ /* 7 is highest priority & 0 is lowest. */
|
||
+ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
|
||
+ dwc->priority = r;
|
||
+ else
|
||
+ dwc->priority = i;
|
||
+
|
||
+ dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
|
||
+ spin_lock_init(&dwc->lock);
|
||
+ dwc->mask = 1 << i;
|
||
+
|
||
+ INIT_LIST_HEAD(&dwc->active_list);
|
||
+ INIT_LIST_HEAD(&dwc->queue);
|
||
+ INIT_LIST_HEAD(&dwc->free_list);
|
||
+
|
||
+ channel_clear_bit(dw, CH_EN, dwc->mask);
|
||
+
|
||
+ dwc->direction = DMA_TRANS_NONE;
|
||
+ dwc->request_line = ~0;
|
||
+
|
||
+ /* Hardware configuration */
|
||
+ if (autocfg) {
|
||
+ unsigned int dwc_params;
|
||
+ void __iomem *addr = chip->regs + r * sizeof(u32);
|
||
+
|
||
+ dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
|
||
+
|
||
+ dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
|
||
+ dwc_params);
|
||
+
|
||
+ /* Decode maximum block size for given channel. The
|
||
+ * stored 4 bit value represents blocks from 0x00 for 3
|
||
+ * up to 0x0a for 4095. */
|
||
+ dwc->block_size =
|
||
+ (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
|
||
+ dwc->nollp =
|
||
+ (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
|
||
+ } else {
|
||
+ dwc->block_size = pdata->block_size;
|
||
+
|
||
+ /* Check if channel supports multi block transfer */
|
||
+ channel_writel(dwc, LLP, 0xfffffffc);
|
||
+ dwc->nollp =
|
||
+ (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
|
||
+ channel_writel(dwc, LLP, 0);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Clear all interrupts on all channels. */
|
||
+ dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
|
||
+ dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
|
||
+ dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
|
||
+ dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
|
||
+ dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
|
||
+
|
||
+ dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
|
||
+ dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
|
||
+ if (pdata->is_private)
|
||
+ dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
|
||
+ dw->dma.dev = chip->dev;
|
||
+ dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
|
||
+ dw->dma.device_free_chan_resources = dwc_free_chan_resources;
|
||
+
|
||
+ dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
|
||
+
|
||
+ dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
|
||
+ dw->dma.device_control = dwc_control;
|
||
+
|
||
+ dw->dma.device_tx_status = dwc_tx_status;
|
||
+ dw->dma.device_issue_pending = dwc_issue_pending;
|
||
+
|
||
+ dma_writel(dw, CFG, DW_CFG_DMA_EN);
|
||
+
|
||
+ dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
|
||
+ nr_channels);
|
||
+
|
||
+ dma_async_device_register(&dw->dma);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(dw_dma_probe);
|
||
+
|
||
+int dw_dma_remove(struct dw_dma_chip *chip)
|
||
+{
|
||
+ struct dw_dma *dw = chip->dw;
|
||
+ struct dw_dma_chan *dwc, *_dwc;
|
||
+
|
||
+ dw_dma_off(dw);
|
||
+ dma_async_device_unregister(&dw->dma);
|
||
+
|
||
+ tasklet_kill(&dw->tasklet);
|
||
+
|
||
+ list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
|
||
+ chan.device_node) {
|
||
+ list_del(&dwc->chan.device_node);
|
||
+ channel_clear_bit(dw, CH_EN, dwc->mask);
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(dw_dma_remove);
|
||
+
|
||
+void dw_dma_shutdown(struct dw_dma_chip *chip)
|
||
+{
|
||
+ struct dw_dma *dw = chip->dw;
|
||
+
|
||
+ dw_dma_off(dw);
|
||
+ clk_disable_unprepare(dw->clk);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(dw_dma_shutdown);
|
||
+
|
||
+#ifdef CONFIG_PM_SLEEP
|
||
+
|
||
+int dw_dma_suspend(struct dw_dma_chip *chip)
|
||
+{
|
||
+ struct dw_dma *dw = chip->dw;
|
||
+
|
||
+ dw_dma_off(dw);
|
||
+ clk_disable_unprepare(dw->clk);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(dw_dma_suspend);
|
||
+
|
||
+int dw_dma_resume(struct dw_dma_chip *chip)
|
||
+{
|
||
+ struct dw_dma *dw = chip->dw;
|
||
+
|
||
+ clk_prepare_enable(dw->clk);
|
||
+ dma_writel(dw, CFG, DW_CFG_DMA_EN);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(dw_dma_resume);
|
||
+
|
||
+#endif /* CONFIG_PM_SLEEP */
|
||
+
|
||
+MODULE_LICENSE("GPL v2");
|
||
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
|
||
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
|
||
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
|
||
diff -urN linux-3.0.101/drivers/dma/dw/internal.h linux-3.0.101.xm510/drivers/dma/dw/internal.h
|
||
--- linux-3.0.101/drivers/dma/dw/internal.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dw/internal.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,70 @@
|
||
+/*
|
||
+ * Driver for the Synopsys DesignWare DMA Controller
|
||
+ *
|
||
+ * Copyright (C) 2013 Intel Corporation
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#ifndef _DW_DMAC_INTERNAL_H
|
||
+#define _DW_DMAC_INTERNAL_H
|
||
+
|
||
+#include <linux/device.h>
|
||
+#include <linux/dw_dmac.h>
|
||
+
|
||
+#include "regs.h"
|
||
+
|
||
+/**
|
||
+ * struct dw_dma_chip - representation of DesignWare DMA controller hardware
|
||
+ * @dev: struct device of the DMA controller
|
||
+ * @irq: irq line
|
||
+ * @regs: memory mapped I/O space
|
||
+ * @dw: struct dw_dma that is filed by dw_dma_probe()
|
||
+ */
|
||
+struct dw_dma_chip {
|
||
+ struct device *dev;
|
||
+ int irq;
|
||
+ void __iomem *regs;
|
||
+ struct dw_dma *dw;
|
||
+};
|
||
+
|
||
+/* Export to the platform drivers */
|
||
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
|
||
+int dw_dma_remove(struct dw_dma_chip *chip);
|
||
+
|
||
+void dw_dma_shutdown(struct dw_dma_chip *chip);
|
||
+
|
||
+#ifdef CONFIG_PM_SLEEP
|
||
+
|
||
+int dw_dma_suspend(struct dw_dma_chip *chip);
|
||
+int dw_dma_resume(struct dw_dma_chip *chip);
|
||
+
|
||
+#endif /* CONFIG_PM_SLEEP */
|
||
+
|
||
+/**
|
||
+ * dwc_get_dms - get destination master
|
||
+ * @slave: pointer to the custom slave configuration
|
||
+ *
|
||
+ * Returns destination master in the custom slave configuration if defined, or
|
||
+ * default value otherwise.
|
||
+ */
|
||
+static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
|
||
+{
|
||
+ return slave ? slave->dst_master : 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * dwc_get_sms - get source master
|
||
+ * @slave: pointer to the custom slave configuration
|
||
+ *
|
||
+ * Returns source master in the custom slave configuration if defined, or
|
||
+ * default value otherwise.
|
||
+ */
|
||
+static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
|
||
+{
|
||
+ return slave ? slave->src_master : 1;
|
||
+}
|
||
+
|
||
+#endif /* _DW_DMAC_INTERNAL_H */
|
||
diff -urN linux-3.0.101/drivers/dma/dw/Kconfig linux-3.0.101.xm510/drivers/dma/dw/Kconfig
|
||
--- linux-3.0.101/drivers/dma/dw/Kconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dw/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,29 @@
|
||
+#
|
||
+# DMA engine configuration for dw
|
||
+#
|
||
+
|
||
+config DW_DMAC_CORE
|
||
+ tristate "Synopsys DesignWare AHB DMA support"
|
||
+ depends on GENERIC_HARDIRQS
|
||
+ select DMA_ENGINE
|
||
+
|
||
+config DW_DMAC
|
||
+ tristate "Synopsys DesignWare AHB DMA platform driver"
|
||
+ select DW_DMAC_CORE
|
||
+ select DW_DMAC_BIG_ENDIAN_IO if AVR32
|
||
+ default y if CPU_AT32AP7000
|
||
+ help
|
||
+ Support the Synopsys DesignWare AHB DMA controller. This
|
||
+ can be integrated in chips such as the Atmel AT32ap7000.
|
||
+
|
||
+config DW_DMAC_PCI
|
||
+ tristate "Synopsys DesignWare AHB DMA PCI driver"
|
||
+ depends on PCI
|
||
+ select DW_DMAC_CORE
|
||
+ help
|
||
+ Support the Synopsys DesignWare AHB DMA controller on the
|
||
+ platfroms that enumerate it as a PCI device. For example,
|
||
+ Intel Medfield has integrated this GPDMA controller.
|
||
+
|
||
+config DW_DMAC_BIG_ENDIAN_IO
|
||
+ bool
|
||
diff -urN linux-3.0.101/drivers/dma/dw/Makefile linux-3.0.101.xm510/drivers/dma/dw/Makefile
|
||
--- linux-3.0.101/drivers/dma/dw/Makefile 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dw/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,8 @@
|
||
+obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
|
||
+dw_dmac_core-objs := core.o
|
||
+
|
||
+obj-$(CONFIG_DW_DMAC) += dw_dmac.o
|
||
+dw_dmac-objs := platform.o
|
||
+
|
||
+obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
|
||
+dw_dmac_pci-objs := pci.o
|
||
diff -urN linux-3.0.101/drivers/dma/dw/pci.c linux-3.0.101.xm510/drivers/dma/dw/pci.c
|
||
--- linux-3.0.101/drivers/dma/dw/pci.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dw/pci.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,101 @@
|
||
+/*
|
||
+ * PCI driver for the Synopsys DesignWare DMA Controller
|
||
+ *
|
||
+ * Copyright (C) 2013 Intel Corporation
|
||
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/module.h>
|
||
+#include <linux/pci.h>
|
||
+#include <linux/device.h>
|
||
+
|
||
+#include "internal.h"
|
||
+
|
||
+static struct dw_dma_platform_data dw_pci_pdata = {
|
||
+ .is_private = 1,
|
||
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
|
||
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
|
||
+};
|
||
+
|
||
+static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||
+{
|
||
+ struct dw_dma_chip *chip;
|
||
+ struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
|
||
+ int ret;
|
||
+
|
||
+ ret = pcim_enable_device(pdev);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+
|
||
+ ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
|
||
+ if (ret) {
|
||
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ pci_set_master(pdev);
|
||
+ pci_try_set_mwi(pdev);
|
||
+
|
||
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||
+ if (ret)
|
||
+ return ret;
|
||
+
|
||
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||
+ if (ret)
|
||
+ return ret;
|
||
+
|
||
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
|
||
+ if (!chip)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ chip->dev = &pdev->dev;
|
||
+ chip->regs = pcim_iomap_table(pdev)[0];
|
||
+ chip->irq = pdev->irq;
|
||
+
|
||
+ ret = dw_dma_probe(chip, pdata);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+
|
||
+ pci_set_drvdata(pdev, chip);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void dw_pci_remove(struct pci_dev *pdev)
|
||
+{
|
||
+ struct dw_dma_chip *chip = pci_get_drvdata(pdev);
|
||
+ int ret;
|
||
+
|
||
+ ret = dw_dma_remove(chip);
|
||
+ if (ret)
|
||
+ dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
|
||
+}
|
||
+
|
||
+static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
|
||
+ /* Medfield */
|
||
+ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
|
||
+ { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata },
|
||
+
|
||
+ /* BayTrail */
|
||
+ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
|
||
+ { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
|
||
+ { }
|
||
+};
|
||
+MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
|
||
+
|
||
+static struct pci_driver dw_pci_driver = {
|
||
+ .name = "dw_dmac_pci",
|
||
+ .id_table = dw_pci_id_table,
|
||
+ .probe = dw_pci_probe,
|
||
+ .remove = dw_pci_remove,
|
||
+};
|
||
+
|
||
+module_pci_driver(dw_pci_driver);
|
||
+
|
||
+MODULE_LICENSE("GPL v2");
|
||
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller PCI driver");
|
||
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
|
||
diff -urN linux-3.0.101/drivers/dma/dw/platform.c linux-3.0.101.xm510/drivers/dma/dw/platform.c
|
||
--- linux-3.0.101/drivers/dma/dw/platform.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dw/platform.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,317 @@
|
||
+/*
|
||
+ * Platform driver for the Synopsys DesignWare DMA Controller
|
||
+ *
|
||
+ * Copyright (C) 2007-2008 Atmel Corporation
|
||
+ * Copyright (C) 2010-2011 ST Microelectronics
|
||
+ * Copyright (C) 2013 Intel Corporation
|
||
+ *
|
||
+ * Some parts of this driver are derived from the original dw_dmac.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/module.h>
|
||
+#include <linux/device.h>
|
||
+#include <linux/clk.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/of.h>
|
||
+#include <linux/of_dma.h>
|
||
+#include <linux/acpi.h>
|
||
+#include <linux/acpi_dma.h>
|
||
+
|
||
+#include "internal.h"
|
||
+
|
||
+struct dw_dma_of_filter_args {
|
||
+ struct dw_dma *dw;
|
||
+ unsigned int req;
|
||
+ unsigned int src;
|
||
+ unsigned int dst;
|
||
+};
|
||
+
|
||
+static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct dw_dma_of_filter_args *fargs = param;
|
||
+
|
||
+ /* Ensure the device matches our channel */
|
||
+ if (chan->device != &fargs->dw->dma)
|
||
+ return false;
|
||
+
|
||
+ dwc->request_line = fargs->req;
|
||
+ dwc->src_master = fargs->src;
|
||
+ dwc->dst_master = fargs->dst;
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
|
||
+ struct of_dma *ofdma)
|
||
+{
|
||
+ struct dw_dma *dw = ofdma->of_dma_data;
|
||
+ struct dw_dma_of_filter_args fargs = {
|
||
+ .dw = dw,
|
||
+ };
|
||
+ dma_cap_mask_t cap;
|
||
+
|
||
+ if (dma_spec->args_count != 3)
|
||
+ return NULL;
|
||
+
|
||
+ fargs.req = dma_spec->args[0];
|
||
+ fargs.src = dma_spec->args[1];
|
||
+ fargs.dst = dma_spec->args[2];
|
||
+
|
||
+ if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
|
||
+ fargs.src >= dw->nr_masters ||
|
||
+ fargs.dst >= dw->nr_masters))
|
||
+ return NULL;
|
||
+
|
||
+ dma_cap_zero(cap);
|
||
+ dma_cap_set(DMA_SLAVE, cap);
|
||
+
|
||
+ /* TODO: there should be a simpler way to do this */
|
||
+ return dma_request_channel(cap, dw_dma_of_filter, &fargs);
|
||
+}
|
||
+
|
||
+#ifdef CONFIG_ACPI
|
||
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
|
||
+{
|
||
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||
+ struct acpi_dma_spec *dma_spec = param;
|
||
+
|
||
+ if (chan->device->dev != dma_spec->dev ||
|
||
+ chan->chan_id != dma_spec->chan_id)
|
||
+ return false;
|
||
+
|
||
+ dwc->request_line = dma_spec->slave_id;
|
||
+ dwc->src_master = dwc_get_sms(NULL);
|
||
+ dwc->dst_master = dwc_get_dms(NULL);
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+static void dw_dma_acpi_controller_register(struct dw_dma *dw)
|
||
+{
|
||
+ struct device *dev = dw->dma.dev;
|
||
+ struct acpi_dma_filter_info *info;
|
||
+ int ret;
|
||
+
|
||
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
|
||
+ if (!info)
|
||
+ return;
|
||
+
|
||
+ dma_cap_zero(info->dma_cap);
|
||
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
|
||
+ info->filter_fn = dw_dma_acpi_filter;
|
||
+
|
||
+ ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
|
||
+ info);
|
||
+ if (ret)
|
||
+ dev_err(dev, "could not register acpi_dma_controller\n");
|
||
+}
|
||
+#else /* !CONFIG_ACPI */
|
||
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
|
||
+#endif /* !CONFIG_ACPI */
|
||
+
|
||
+#ifdef CONFIG_OF
|
||
+static struct dw_dma_platform_data *
|
||
+dw_dma_parse_dt(struct platform_device *pdev)
|
||
+{
|
||
+ struct device_node *np = pdev->dev.of_node;
|
||
+ struct dw_dma_platform_data *pdata;
|
||
+ u32 tmp, arr[4];
|
||
+
|
||
+ if (!np) {
|
||
+ dev_err(&pdev->dev, "Missing DT data\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||
+ if (!pdata)
|
||
+ return NULL;
|
||
+
|
||
+ if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
|
||
+ return NULL;
|
||
+
|
||
+ if (of_property_read_bool(np, "is_private"))
|
||
+ pdata->is_private = true;
|
||
+
|
||
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
|
||
+ pdata->chan_allocation_order = (unsigned char)tmp;
|
||
+
|
||
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
|
||
+ pdata->chan_priority = tmp;
|
||
+
|
||
+ if (!of_property_read_u32(np, "block_size", &tmp))
|
||
+ pdata->block_size = tmp;
|
||
+
|
||
+ if (!of_property_read_u32(np, "dma-masters", &tmp)) {
|
||
+ if (tmp > 4)
|
||
+ return NULL;
|
||
+
|
||
+ pdata->nr_masters = tmp;
|
||
+ }
|
||
+
|
||
+ if (!of_property_read_u32_array(np, "data_width", arr,
|
||
+ pdata->nr_masters))
|
||
+ for (tmp = 0; tmp < pdata->nr_masters; tmp++)
|
||
+ pdata->data_width[tmp] = arr[tmp];
|
||
+
|
||
+ return pdata;
|
||
+}
|
||
+#else
|
||
+static inline struct dw_dma_platform_data *
|
||
+dw_dma_parse_dt(struct platform_device *pdev)
|
||
+{
|
||
+ return NULL;
|
||
+}
|
||
+#endif
|
||
+
|
||
+static int dw_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct dw_dma_chip *chip;
|
||
+ struct device *dev = &pdev->dev;
|
||
+ struct resource *mem;
|
||
+ struct dw_dma_platform_data *pdata;
|
||
+ int err;
|
||
+
|
||
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
|
||
+ if (!chip)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ chip->irq = platform_get_irq(pdev, 0);
|
||
+ if (chip->irq < 0)
|
||
+ return chip->irq;
|
||
+
|
||
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ chip->regs = devm_ioremap_resource(dev, mem);
|
||
+ if (IS_ERR(chip->regs))
|
||
+ return PTR_ERR(chip->regs);
|
||
+
|
||
+ /* Apply default dma_mask if needed */
|
||
+ if (!dev->dma_mask) {
|
||
+ dev->dma_mask = &dev->coherent_dma_mask;
|
||
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||
+ }
|
||
+
|
||
+ pdata = dev_get_platdata(dev);
|
||
+ if (!pdata)
|
||
+ pdata = dw_dma_parse_dt(pdev);
|
||
+
|
||
+ chip->dev = dev;
|
||
+
|
||
+ err = dw_dma_probe(chip, pdata);
|
||
+ if (err)
|
||
+ return err;
|
||
+
|
||
+ platform_set_drvdata(pdev, chip);
|
||
+
|
||
+ if (pdev->dev.of_node) {
|
||
+ err = of_dma_controller_register(pdev->dev.of_node,
|
||
+ dw_dma_of_xlate, chip->dw);
|
||
+ if (err)
|
||
+ dev_err(&pdev->dev,
|
||
+ "could not register of_dma_controller\n");
|
||
+ }
|
||
+
|
||
+ if (ACPI_HANDLE(&pdev->dev))
|
||
+ dw_dma_acpi_controller_register(chip->dw);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int dw_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
|
||
+
|
||
+ if (pdev->dev.of_node)
|
||
+ of_dma_controller_free(pdev->dev.of_node);
|
||
+
|
||
+ return dw_dma_remove(chip);
|
||
+}
|
||
+
|
||
+static void dw_shutdown(struct platform_device *pdev)
|
||
+{
|
||
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
|
||
+
|
||
+ dw_dma_shutdown(chip);
|
||
+}
|
||
+
|
||
+#ifdef CONFIG_OF
|
||
+static const struct of_device_id dw_dma_of_id_table[] = {
|
||
+ { .compatible = "snps,dma-spear1340" },
|
||
+ {}
|
||
+};
|
||
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
|
||
+#endif
|
||
+
|
||
+#ifdef CONFIG_ACPI
|
||
+static const struct acpi_device_id dw_dma_acpi_id_table[] = {
|
||
+ { "INTL9C60", 0 },
|
||
+ { }
|
||
+};
|
||
+#endif
|
||
+
|
||
+#ifdef CONFIG_PM_SLEEP
|
||
+
|
||
+static int dw_suspend_noirq(struct device *dev)
|
||
+{
|
||
+ struct platform_device *pdev = to_platform_device(dev);
|
||
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
|
||
+
|
||
+ return dw_dma_suspend(chip);
|
||
+}
|
||
+
|
||
+static int dw_resume_noirq(struct device *dev)
|
||
+{
|
||
+ struct platform_device *pdev = to_platform_device(dev);
|
||
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
|
||
+
|
||
+ return dw_dma_resume(chip);
|
||
+}
|
||
+
|
||
+#else /* !CONFIG_PM_SLEEP */
|
||
+
|
||
+#define dw_suspend_noirq NULL
|
||
+#define dw_resume_noirq NULL
|
||
+
|
||
+#endif /* !CONFIG_PM_SLEEP */
|
||
+
|
||
+static const struct dev_pm_ops dw_dev_pm_ops = {
|
||
+ .suspend_noirq = dw_suspend_noirq,
|
||
+ .resume_noirq = dw_resume_noirq,
|
||
+ .freeze_noirq = dw_suspend_noirq,
|
||
+ .thaw_noirq = dw_resume_noirq,
|
||
+ .restore_noirq = dw_resume_noirq,
|
||
+ .poweroff_noirq = dw_suspend_noirq,
|
||
+};
|
||
+
|
||
+static struct platform_driver dw_driver = {
|
||
+ .probe = dw_probe,
|
||
+ .remove = dw_remove,
|
||
+ .shutdown = dw_shutdown,
|
||
+ .driver = {
|
||
+ .name = "dw_dmac",
|
||
+ .pm = &dw_dev_pm_ops,
|
||
+ .of_match_table = of_match_ptr(dw_dma_of_id_table),
|
||
+ .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
|
||
+ },
|
||
+};
|
||
+
|
||
+static int __init dw_init(void)
|
||
+{
|
||
+ return platform_driver_register(&dw_driver);
|
||
+}
|
||
+subsys_initcall(dw_init);
|
||
+
|
||
+static void __exit dw_exit(void)
|
||
+{
|
||
+ platform_driver_unregister(&dw_driver);
|
||
+}
|
||
+module_exit(dw_exit);
|
||
+
|
||
+MODULE_LICENSE("GPL v2");
|
||
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
|
||
diff -urN linux-3.0.101/drivers/dma/dw/regs.h linux-3.0.101.xm510/drivers/dma/dw/regs.h
|
||
--- linux-3.0.101/drivers/dma/dw/regs.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/dw/regs.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,318 @@
|
||
+/*
|
||
+ * Driver for the Synopsys DesignWare AHB DMA Controller
|
||
+ *
|
||
+ * Copyright (C) 2005-2007 Atmel Corporation
|
||
+ * Copyright (C) 2010-2011 ST Microelectronics
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dw_dmac.h>
|
||
+
|
||
+#define DW_DMA_MAX_NR_CHANNELS 8
|
||
+#define DW_DMA_MAX_NR_REQUESTS 16
|
||
+
|
||
+/* flow controller */
|
||
+enum dw_dma_fc {
|
||
+ DW_DMA_FC_D_M2M,
|
||
+ DW_DMA_FC_D_M2P,
|
||
+ DW_DMA_FC_D_P2M,
|
||
+ DW_DMA_FC_D_P2P,
|
||
+ DW_DMA_FC_P_P2M,
|
||
+ DW_DMA_FC_SP_P2P,
|
||
+ DW_DMA_FC_P_M2P,
|
||
+ DW_DMA_FC_DP_P2P,
|
||
+};
|
||
+
|
||
+/*
|
||
+ * Redefine this macro to handle differences between 32- and 64-bit
|
||
+ * addressing, big vs. little endian, etc.
|
||
+ */
|
||
+#define DW_REG(name) u32 name; u32 __pad_##name
|
||
+
|
||
+/* Hardware register definitions. */
|
||
+struct dw_dma_chan_regs {
|
||
+ DW_REG(SAR); /* Source Address Register */
|
||
+ DW_REG(DAR); /* Destination Address Register */
|
||
+ DW_REG(LLP); /* Linked List Pointer */
|
||
+ u32 CTL_LO; /* Control Register Low */
|
||
+ u32 CTL_HI; /* Control Register High */
|
||
+ DW_REG(SSTAT);
|
||
+ DW_REG(DSTAT);
|
||
+ DW_REG(SSTATAR);
|
||
+ DW_REG(DSTATAR);
|
||
+ u32 CFG_LO; /* Configuration Register Low */
|
||
+ u32 CFG_HI; /* Configuration Register High */
|
||
+ DW_REG(SGR);
|
||
+ DW_REG(DSR);
|
||
+};
|
||
+
|
||
+struct dw_dma_irq_regs {
|
||
+ DW_REG(XFER);
|
||
+ DW_REG(BLOCK);
|
||
+ DW_REG(SRC_TRAN);
|
||
+ DW_REG(DST_TRAN);
|
||
+ DW_REG(ERROR);
|
||
+};
|
||
+
|
||
+struct dw_dma_regs {
|
||
+ /* per-channel registers */
|
||
+ struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
|
||
+
|
||
+ /* irq handling */
|
||
+ struct dw_dma_irq_regs RAW; /* r */
|
||
+ struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
|
||
+ struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
|
||
+ struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
|
||
+
|
||
+ DW_REG(STATUS_INT); /* r */
|
||
+
|
||
+ /* software handshaking */
|
||
+ DW_REG(REQ_SRC);
|
||
+ DW_REG(REQ_DST);
|
||
+ DW_REG(SGL_REQ_SRC);
|
||
+ DW_REG(SGL_REQ_DST);
|
||
+ DW_REG(LAST_SRC);
|
||
+ DW_REG(LAST_DST);
|
||
+
|
||
+ /* miscellaneous */
|
||
+ DW_REG(CFG);
|
||
+ DW_REG(CH_EN);
|
||
+ DW_REG(ID);
|
||
+ DW_REG(TEST);
|
||
+
|
||
+ /* reserved */
|
||
+ DW_REG(__reserved0);
|
||
+ DW_REG(__reserved1);
|
||
+
|
||
+ /* optional encoded params, 0x3c8..0x3f7 */
|
||
+ u32 __reserved;
|
||
+
|
||
+ /* per-channel configuration registers */
|
||
+ u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
|
||
+ u32 MULTI_BLK_TYPE;
|
||
+ u32 MAX_BLK_SIZE;
|
||
+
|
||
+ /* top-level parameters */
|
||
+ u32 DW_PARAMS;
|
||
+};
|
||
+
|
||
+/*
|
||
+ * Big endian I/O access when reading and writing to the DMA controller
|
||
+ * registers. This is needed on some platforms, like the Atmel AVR32
|
||
+ * architecture.
|
||
+ */
|
||
+
|
||
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
|
||
+#define dma_readl_native ioread32be
|
||
+#define dma_writel_native iowrite32be
|
||
+#else
|
||
+#define dma_readl_native readl
|
||
+#define dma_writel_native writel
|
||
+#endif
|
||
+
|
||
+/* To access the registers in early stage of probe */
|
||
+#define dma_read_byaddr(addr, name) \
|
||
+ dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
|
||
+
|
||
+/* Bitfields in DW_PARAMS */
|
||
+#define DW_PARAMS_NR_CHAN 8 /* number of channels */
|
||
+#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
|
||
+#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
|
||
+#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
|
||
+#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
|
||
+#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
|
||
+#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
|
||
+#define DW_PARAMS_EN 28 /* encoded parameters */
|
||
+
|
||
+/* Bitfields in DWC_PARAMS */
|
||
+#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
|
||
+
|
||
+/* Bitfields in CTL_LO */
|
||
+#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
|
||
+#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
|
||
+#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
|
||
+#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
|
||
+#define DWC_CTLL_DST_DEC (1<<7)
|
||
+#define DWC_CTLL_DST_FIX (2<<7)
|
||
+#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
|
||
+#define DWC_CTLL_SRC_DEC (1<<9)
|
||
+#define DWC_CTLL_SRC_FIX (2<<9)
|
||
+#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
|
||
+#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
|
||
+#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
|
||
+#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
|
||
+#define DWC_CTLL_FC(n) ((n) << 20)
|
||
+#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
|
||
+#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
|
||
+#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
|
||
+#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
|
||
+/* plus 4 transfer types for peripheral-as-flow-controller */
|
||
+#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
|
||
+#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
|
||
+#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
|
||
+#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
|
||
+
|
||
+/* Bitfields in CTL_HI */
|
||
+#define DWC_CTLH_DONE 0x00001000
|
||
+#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
|
||
+
|
||
+/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
|
||
+#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
|
||
+#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
|
||
+#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
|
||
+#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
|
||
+#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
|
||
+#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
|
||
+#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
|
||
+#define DWC_CFGL_RELOAD_SAR (1 << 30)
|
||
+#define DWC_CFGL_RELOAD_DAR (1 << 31)
|
||
+
|
||
+/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
|
||
+#define DWC_CFGH_DS_UPD_EN (1 << 5)
|
||
+#define DWC_CFGH_SS_UPD_EN (1 << 6)
|
||
+
|
||
+/* Bitfields in SGR */
|
||
+#define DWC_SGR_SGI(x) ((x) << 0)
|
||
+#define DWC_SGR_SGC(x) ((x) << 20)
|
||
+
|
||
+/* Bitfields in DSR */
|
||
+#define DWC_DSR_DSI(x) ((x) << 0)
|
||
+#define DWC_DSR_DSC(x) ((x) << 20)
|
||
+
|
||
+/* Bitfields in CFG */
|
||
+#define DW_CFG_DMA_EN (1 << 0)
|
||
+
|
||
+enum dw_dmac_flags {
|
||
+ DW_DMA_IS_CYCLIC = 0,
|
||
+ DW_DMA_IS_SOFT_LLP = 1,
|
||
+};
|
||
+
|
||
+struct dw_dma_chan {
|
||
+ struct dma_chan chan;
|
||
+ void __iomem *ch_regs;
|
||
+ u8 mask;
|
||
+ u8 priority;
|
||
+ enum dma_transfer_direction direction;
|
||
+ bool paused;
|
||
+ bool initialized;
|
||
+
|
||
+ /* software emulation of the LLP transfers */
|
||
+ struct list_head *tx_node_active;
|
||
+
|
||
+ spinlock_t lock;
|
||
+
|
||
+ /* these other elements are all protected by lock */
|
||
+ unsigned long flags;
|
||
+ struct list_head active_list;
|
||
+ struct list_head queue;
|
||
+ struct list_head free_list;
|
||
+ u32 residue;
|
||
+ struct dw_cyclic_desc *cdesc;
|
||
+
|
||
+ unsigned int descs_allocated;
|
||
+
|
||
+ /* hardware configuration */
|
||
+ unsigned int block_size;
|
||
+ bool nollp;
|
||
+
|
||
+ /* custom slave configuration */
|
||
+ unsigned int request_line;
|
||
+ unsigned char src_master;
|
||
+ unsigned char dst_master;
|
||
+
|
||
+ /* configuration passed via DMA_SLAVE_CONFIG */
|
||
+ struct dma_slave_config dma_sconfig;
|
||
+};
|
||
+
|
||
+static inline struct dw_dma_chan_regs __iomem *
|
||
+__dwc_regs(struct dw_dma_chan *dwc)
|
||
+{
|
||
+ return dwc->ch_regs;
|
||
+}
|
||
+
|
||
+#define channel_readl(dwc, name) \
|
||
+ dma_readl_native(&(__dwc_regs(dwc)->name))
|
||
+#define channel_writel(dwc, name, val) \
|
||
+ dma_writel_native((val), &(__dwc_regs(dwc)->name))
|
||
+
|
||
+static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
|
||
+{
|
||
+ return container_of(chan, struct dw_dma_chan, chan);
|
||
+}
|
||
+
|
||
+struct dw_dma {
|
||
+ struct dma_device dma;
|
||
+ void __iomem *regs;
|
||
+ struct dma_pool *desc_pool;
|
||
+ struct tasklet_struct tasklet;
|
||
+ struct clk *clk;
|
||
+
|
||
+ u8 all_chan_mask;
|
||
+
|
||
+ /* hardware configuration */
|
||
+ unsigned char nr_masters;
|
||
+ unsigned char data_width[4];
|
||
+
|
||
+ struct dw_dma_chan chan[0];
|
||
+};
|
||
+
|
||
+static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
|
||
+{
|
||
+ return dw->regs;
|
||
+}
|
||
+
|
||
+#define dma_readl(dw, name) \
|
||
+ dma_readl_native(&(__dw_regs(dw)->name))
|
||
+#define dma_writel(dw, name, val) \
|
||
+ dma_writel_native((val), &(__dw_regs(dw)->name))
|
||
+
|
||
+#define channel_set_bit(dw, reg, mask) \
|
||
+ dma_writel(dw, reg, ((mask) << 8) | (mask))
|
||
+#define channel_clear_bit(dw, reg, mask) \
|
||
+ dma_writel(dw, reg, ((mask) << 8) | 0)
|
||
+
|
||
+static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
|
||
+{
|
||
+ return container_of(ddev, struct dw_dma, dma);
|
||
+}
|
||
+
|
||
+/* LLI == Linked List Item; a.k.a. DMA block descriptor */
|
||
+struct dw_lli {
|
||
+ /* values that are not changed by hardware */
|
||
+ u32 sar;
|
||
+ u32 dar;
|
||
+ u32 llp; /* chain to next lli */
|
||
+ u32 ctllo;
|
||
+ /* values that may get written back: */
|
||
+ u32 ctlhi;
|
||
+ /* sstat and dstat can snapshot peripheral register state.
|
||
+ * silicon config may discard either or both...
|
||
+ */
|
||
+ u32 sstat;
|
||
+ u32 dstat;
|
||
+};
|
||
+
|
||
+struct dw_desc {
|
||
+ /* FIRST values the hardware uses */
|
||
+ struct dw_lli lli;
|
||
+
|
||
+ /* THEN values for driver housekeeping */
|
||
+ struct list_head desc_node;
|
||
+ struct list_head tx_list;
|
||
+ struct dma_async_tx_descriptor txd;
|
||
+ size_t len;
|
||
+ size_t total_len;
|
||
+};
|
||
+
|
||
+#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
|
||
+
|
||
+static inline struct dw_desc *
|
||
+txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
|
||
+{
|
||
+ return container_of(txd, struct dw_desc, txd);
|
||
+}
|
||
diff -urN linux-3.0.101/drivers/dma/edma.c linux-3.0.101.xm510/drivers/dma/edma.c
|
||
--- linux-3.0.101/drivers/dma/edma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/edma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,672 @@
|
||
+/*
|
||
+ * TI EDMA DMA engine driver
|
||
+ *
|
||
+ * Copyright 2012 Texas Instruments
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or
|
||
+ * modify it under the terms of the GNU General Public License as
|
||
+ * published by the Free Software Foundation version 2.
|
||
+ *
|
||
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||
+ * kind, whether express or implied; without even the implied warranty
|
||
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||
+ * GNU General Public License for more details.
|
||
+ */
|
||
+
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/list.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/spinlock.h>
|
||
+
|
||
+#include <mach/edma.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+#include "virt-dma.h"
|
||
+
|
||
+/*
|
||
+ * This will go away when the private EDMA API is folded
|
||
+ * into this driver and the platform device(s) are
|
||
+ * instantiated in the arch code. We can only get away
|
||
+ * with this simplification because DA8XX may not be built
|
||
+ * in the same kernel image with other DaVinci parts. This
|
||
+ * avoids having to sprinkle dmaengine driver platform devices
|
||
+ * and data throughout all the existing board files.
|
||
+ */
|
||
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
|
||
+#define EDMA_CTLRS 2
|
||
+#define EDMA_CHANS 32
|
||
+#else
|
||
+#define EDMA_CTLRS 1
|
||
+#define EDMA_CHANS 64
|
||
+#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
|
||
+
|
||
+/* Max of 16 segments per channel to conserve PaRAM slots */
|
||
+#define MAX_NR_SG 16
|
||
+#define EDMA_MAX_SLOTS MAX_NR_SG
|
||
+#define EDMA_DESCRIPTORS 16
|
||
+
|
||
+struct edma_desc {
|
||
+ struct virt_dma_desc vdesc;
|
||
+ struct list_head node;
|
||
+ int absync;
|
||
+ int pset_nr;
|
||
+ struct edmacc_param pset[0];
|
||
+};
|
||
+
|
||
+struct edma_cc;
|
||
+
|
||
+struct edma_chan {
|
||
+ struct virt_dma_chan vchan;
|
||
+ struct list_head node;
|
||
+ struct edma_desc *edesc;
|
||
+ struct edma_cc *ecc;
|
||
+ int ch_num;
|
||
+ bool alloced;
|
||
+ int slot[EDMA_MAX_SLOTS];
|
||
+ struct dma_slave_config cfg;
|
||
+};
|
||
+
|
||
+struct edma_cc {
|
||
+ int ctlr;
|
||
+ struct dma_device dma_slave;
|
||
+ struct edma_chan slave_chans[EDMA_CHANS];
|
||
+ int num_slave_chans;
|
||
+ int dummy_slot;
|
||
+};
|
||
+
|
||
+static inline struct edma_cc *to_edma_cc(struct dma_device *d)
|
||
+{
|
||
+ return container_of(d, struct edma_cc, dma_slave);
|
||
+}
|
||
+
|
||
+static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
|
||
+{
|
||
+ return container_of(c, struct edma_chan, vchan.chan);
|
||
+}
|
||
+
|
||
+static inline struct edma_desc
|
||
+*to_edma_desc(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ return container_of(tx, struct edma_desc, vdesc.tx);
|
||
+}
|
||
+
|
||
+static void edma_desc_free(struct virt_dma_desc *vdesc)
|
||
+{
|
||
+ kfree(container_of(vdesc, struct edma_desc, vdesc));
|
||
+}
|
||
+
|
||
+/* Dispatch a queued descriptor to the controller (caller holds lock) */
|
||
+static void edma_execute(struct edma_chan *echan)
|
||
+{
|
||
+ struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
|
||
+ struct edma_desc *edesc;
|
||
+ int i;
|
||
+
|
||
+ if (!vdesc) {
|
||
+ echan->edesc = NULL;
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ list_del(&vdesc->node);
|
||
+
|
||
+ echan->edesc = edesc = to_edma_desc(&vdesc->tx);
|
||
+
|
||
+ /* Write descriptor PaRAM set(s) */
|
||
+ for (i = 0; i < edesc->pset_nr; i++) {
|
||
+ edma_write_slot(echan->slot[i], &edesc->pset[i]);
|
||
+ dev_dbg(echan->vchan.chan.device->dev,
|
||
+ "\n pset[%d]:\n"
|
||
+ " chnum\t%d\n"
|
||
+ " slot\t%d\n"
|
||
+ " opt\t%08x\n"
|
||
+ " src\t%08x\n"
|
||
+ " dst\t%08x\n"
|
||
+ " abcnt\t%08x\n"
|
||
+ " ccnt\t%08x\n"
|
||
+ " bidx\t%08x\n"
|
||
+ " cidx\t%08x\n"
|
||
+ " lkrld\t%08x\n",
|
||
+ i, echan->ch_num, echan->slot[i],
|
||
+ edesc->pset[i].opt,
|
||
+ edesc->pset[i].src,
|
||
+ edesc->pset[i].dst,
|
||
+ edesc->pset[i].a_b_cnt,
|
||
+ edesc->pset[i].ccnt,
|
||
+ edesc->pset[i].src_dst_bidx,
|
||
+ edesc->pset[i].src_dst_cidx,
|
||
+ edesc->pset[i].link_bcntrld);
|
||
+ /* Link to the previous slot if not the last set */
|
||
+ if (i != (edesc->pset_nr - 1))
|
||
+ edma_link(echan->slot[i], echan->slot[i+1]);
|
||
+ /* Final pset links to the dummy pset */
|
||
+ else
|
||
+ edma_link(echan->slot[i], echan->ecc->dummy_slot);
|
||
+ }
|
||
+
|
||
+ edma_start(echan->ch_num);
|
||
+}
|
||
+
|
||
+static int edma_terminate_all(struct edma_chan *echan)
|
||
+{
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(head);
|
||
+
|
||
+ spin_lock_irqsave(&echan->vchan.lock, flags);
|
||
+
|
||
+ /*
|
||
+ * Stop DMA activity: we assume the callback will not be called
|
||
+ * after edma_dma() returns (even if it does, it will see
|
||
+ * echan->edesc is NULL and exit.)
|
||
+ */
|
||
+ if (echan->edesc) {
|
||
+ echan->edesc = NULL;
|
||
+ edma_stop(echan->ch_num);
|
||
+ }
|
||
+
|
||
+ vchan_get_all_descriptors(&echan->vchan, &head);
|
||
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
|
||
+ vchan_dma_desc_free_list(&echan->vchan, &head);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int edma_slave_config(struct edma_chan *echan,
|
||
+ struct dma_slave_config *cfg)
|
||
+{
|
||
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
|
||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
|
||
+ return -EINVAL;
|
||
+
|
||
+ memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ int ret = 0;
|
||
+ struct dma_slave_config *config;
|
||
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ edma_terminate_all(echan);
|
||
+ break;
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ config = (struct dma_slave_config *)arg;
|
||
+ ret = edma_slave_config(echan, config);
|
||
+ break;
|
||
+ default:
|
||
+ ret = -ENOSYS;
|
||
+ }
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
||
+ struct dma_chan *chan, struct scatterlist *sgl,
|
||
+ unsigned int sg_len, enum dma_transfer_direction direction,
|
||
+ unsigned long tx_flags, void *context)
|
||
+{
|
||
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
+ struct device *dev = chan->device->dev;
|
||
+ struct edma_desc *edesc;
|
||
+ dma_addr_t dev_addr;
|
||
+ enum dma_slave_buswidth dev_width;
|
||
+ u32 burst;
|
||
+ struct scatterlist *sg;
|
||
+ int i;
|
||
+ int acnt, bcnt, ccnt, src, dst, cidx;
|
||
+ int src_bidx, dst_bidx, src_cidx, dst_cidx;
|
||
+
|
||
+ if (unlikely(!echan || !sgl || !sg_len))
|
||
+ return NULL;
|
||
+
|
||
+ if (direction == DMA_DEV_TO_MEM) {
|
||
+ dev_addr = echan->cfg.src_addr;
|
||
+ dev_width = echan->cfg.src_addr_width;
|
||
+ burst = echan->cfg.src_maxburst;
|
||
+ } else if (direction == DMA_MEM_TO_DEV) {
|
||
+ dev_addr = echan->cfg.dst_addr;
|
||
+ dev_width = echan->cfg.dst_addr_width;
|
||
+ burst = echan->cfg.dst_maxburst;
|
||
+ } else {
|
||
+ dev_err(dev, "%s: bad direction?\n", __func__);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
|
||
+ dev_err(dev, "Undefined slave buswidth\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (sg_len > MAX_NR_SG) {
|
||
+ dev_err(dev, "Exceeded max SG segments %d > %d\n",
|
||
+ sg_len, MAX_NR_SG);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ edesc = kzalloc(sizeof(*edesc) + sg_len *
|
||
+ sizeof(edesc->pset[0]), GFP_ATOMIC);
|
||
+ if (!edesc) {
|
||
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ edesc->pset_nr = sg_len;
|
||
+
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ /* Allocate a PaRAM slot, if needed */
|
||
+ if (echan->slot[i] < 0) {
|
||
+ echan->slot[i] =
|
||
+ edma_alloc_slot(EDMA_CTLR(echan->ch_num),
|
||
+ EDMA_SLOT_ANY);
|
||
+ if (echan->slot[i] < 0) {
|
||
+ dev_err(dev, "Failed to allocate slot\n");
|
||
+ return NULL;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ acnt = dev_width;
|
||
+
|
||
+ /*
|
||
+ * If the maxburst is equal to the fifo width, use
|
||
+ * A-synced transfers. This allows for large contiguous
|
||
+ * buffer transfers using only one PaRAM set.
|
||
+ */
|
||
+ if (burst == 1) {
|
||
+ edesc->absync = false;
|
||
+ ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
|
||
+ bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
|
||
+ if (bcnt)
|
||
+ ccnt++;
|
||
+ else
|
||
+ bcnt = SZ_64K - 1;
|
||
+ cidx = acnt;
|
||
+ /*
|
||
+ * If maxburst is greater than the fifo address_width,
|
||
+ * use AB-synced transfers where A count is the fifo
|
||
+ * address_width and B count is the maxburst. In this
|
||
+ * case, we are limited to transfers of C count frames
|
||
+ * of (address_width * maxburst) where C count is limited
|
||
+ * to SZ_64K-1. This places an upper bound on the length
|
||
+ * of an SG segment that can be handled.
|
||
+ */
|
||
+ } else {
|
||
+ edesc->absync = true;
|
||
+ bcnt = burst;
|
||
+ ccnt = sg_dma_len(sg) / (acnt * bcnt);
|
||
+ if (ccnt > (SZ_64K - 1)) {
|
||
+ dev_err(dev, "Exceeded max SG segment size\n");
|
||
+ return NULL;
|
||
+ }
|
||
+ cidx = acnt * bcnt;
|
||
+ }
|
||
+
|
||
+ if (direction == DMA_MEM_TO_DEV) {
|
||
+ src = sg_dma_address(sg);
|
||
+ dst = dev_addr;
|
||
+ src_bidx = acnt;
|
||
+ src_cidx = cidx;
|
||
+ dst_bidx = 0;
|
||
+ dst_cidx = 0;
|
||
+ } else {
|
||
+ src = dev_addr;
|
||
+ dst = sg_dma_address(sg);
|
||
+ src_bidx = 0;
|
||
+ src_cidx = 0;
|
||
+ dst_bidx = acnt;
|
||
+ dst_cidx = cidx;
|
||
+ }
|
||
+
|
||
+ edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
||
+ /* Configure A or AB synchronized transfers */
|
||
+ if (edesc->absync)
|
||
+ edesc->pset[i].opt |= SYNCDIM;
|
||
+ /* If this is the last set, enable completion interrupt flag */
|
||
+ if (i == sg_len - 1)
|
||
+ edesc->pset[i].opt |= TCINTEN;
|
||
+
|
||
+ edesc->pset[i].src = src;
|
||
+ edesc->pset[i].dst = dst;
|
||
+
|
||
+ edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
||
+ edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
|
||
+
|
||
+ edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
|
||
+ edesc->pset[i].ccnt = ccnt;
|
||
+ edesc->pset[i].link_bcntrld = 0xffffffff;
|
||
+
|
||
+ }
|
||
+
|
||
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
|
||
+}
|
||
+
|
||
+static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
|
||
+{
|
||
+ struct edma_chan *echan = data;
|
||
+ struct device *dev = echan->vchan.chan.device->dev;
|
||
+ struct edma_desc *edesc;
|
||
+ unsigned long flags;
|
||
+
|
||
+ /* Stop the channel */
|
||
+ edma_stop(echan->ch_num);
|
||
+
|
||
+ switch (ch_status) {
|
||
+ case DMA_COMPLETE:
|
||
+ dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
|
||
+
|
||
+ spin_lock_irqsave(&echan->vchan.lock, flags);
|
||
+
|
||
+ edesc = echan->edesc;
|
||
+ if (edesc) {
|
||
+ edma_execute(echan);
|
||
+ vchan_cookie_complete(&edesc->vdesc);
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
|
||
+
|
||
+ break;
|
||
+ case DMA_CC_ERROR:
|
||
+ dev_dbg(dev, "transfer error on channel %d\n", ch_num);
|
||
+ break;
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+}
|
||
+
|
||
+/* Alloc channel resources */
|
||
+static int edma_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
+ struct device *dev = chan->device->dev;
|
||
+ int ret;
|
||
+ int a_ch_num;
|
||
+ LIST_HEAD(descs);
|
||
+
|
||
+ a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
|
||
+ chan, EVENTQ_DEFAULT);
|
||
+
|
||
+ if (a_ch_num < 0) {
|
||
+ ret = -ENODEV;
|
||
+ goto err_no_chan;
|
||
+ }
|
||
+
|
||
+ if (a_ch_num != echan->ch_num) {
|
||
+ dev_err(dev, "failed to allocate requested channel %u:%u\n",
|
||
+ EDMA_CTLR(echan->ch_num),
|
||
+ EDMA_CHAN_SLOT(echan->ch_num));
|
||
+ ret = -ENODEV;
|
||
+ goto err_wrong_chan;
|
||
+ }
|
||
+
|
||
+ echan->alloced = true;
|
||
+ echan->slot[0] = echan->ch_num;
|
||
+
|
||
+ dev_info(dev, "allocated channel for %u:%u\n",
|
||
+ EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
|
||
+
|
||
+ return 0;
|
||
+
|
||
+err_wrong_chan:
|
||
+ edma_free_channel(a_ch_num);
|
||
+err_no_chan:
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/* Free channel resources */
|
||
+static void edma_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
+ struct device *dev = chan->device->dev;
|
||
+ int i;
|
||
+
|
||
+ /* Terminate transfers */
|
||
+ edma_stop(echan->ch_num);
|
||
+
|
||
+ vchan_free_chan_resources(&echan->vchan);
|
||
+
|
||
+ /* Free EDMA PaRAM slots */
|
||
+ for (i = 1; i < EDMA_MAX_SLOTS; i++) {
|
||
+ if (echan->slot[i] >= 0) {
|
||
+ edma_free_slot(echan->slot[i]);
|
||
+ echan->slot[i] = -1;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Free EDMA channel */
|
||
+ if (echan->alloced) {
|
||
+ edma_free_channel(echan->ch_num);
|
||
+ echan->alloced = false;
|
||
+ }
|
||
+
|
||
+ dev_info(dev, "freeing channel for %u\n", echan->ch_num);
|
||
+}
|
||
+
|
||
+/* Send pending descriptor to hardware */
|
||
+static void edma_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&echan->vchan.lock, flags);
|
||
+ if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
|
||
+ edma_execute(echan);
|
||
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
|
||
+}
|
||
+
|
||
+static size_t edma_desc_size(struct edma_desc *edesc)
|
||
+{
|
||
+ int i;
|
||
+ size_t size;
|
||
+
|
||
+ if (edesc->absync)
|
||
+ for (size = i = 0; i < edesc->pset_nr; i++)
|
||
+ size += (edesc->pset[i].a_b_cnt & 0xffff) *
|
||
+ (edesc->pset[i].a_b_cnt >> 16) *
|
||
+ edesc->pset[i].ccnt;
|
||
+ else
|
||
+ size = (edesc->pset[0].a_b_cnt & 0xffff) *
|
||
+ (edesc->pset[0].a_b_cnt >> 16) +
|
||
+ (edesc->pset[0].a_b_cnt & 0xffff) *
|
||
+ (SZ_64K - 1) * edesc->pset[0].ccnt;
|
||
+
|
||
+ return size;
|
||
+}
|
||
+
|
||
+/* Check request completion status */
|
||
+static enum dma_status edma_tx_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie,
|
||
+ struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
+ struct virt_dma_desc *vdesc;
|
||
+ enum dma_status ret;
|
||
+ unsigned long flags;
|
||
+
|
||
+ ret = dma_cookie_status(chan, cookie, txstate);
|
||
+ if (ret == DMA_SUCCESS || !txstate)
|
||
+ return ret;
|
||
+
|
||
+ spin_lock_irqsave(&echan->vchan.lock, flags);
|
||
+ vdesc = vchan_find_desc(&echan->vchan, cookie);
|
||
+ if (vdesc) {
|
||
+ txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
|
||
+ } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
|
||
+ struct edma_desc *edesc = echan->edesc;
|
||
+ txstate->residue = edma_desc_size(edesc);
|
||
+ } else {
|
||
+ txstate->residue = 0;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void __init edma_chan_init(struct edma_cc *ecc,
|
||
+ struct dma_device *dma,
|
||
+ struct edma_chan *echans)
|
||
+{
|
||
+ int i, j;
|
||
+
|
||
+ for (i = 0; i < EDMA_CHANS; i++) {
|
||
+ struct edma_chan *echan = &echans[i];
|
||
+ echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
|
||
+ echan->ecc = ecc;
|
||
+ echan->vchan.desc_free = edma_desc_free;
|
||
+
|
||
+ vchan_init(&echan->vchan, dma);
|
||
+
|
||
+ INIT_LIST_HEAD(&echan->node);
|
||
+ for (j = 0; j < EDMA_MAX_SLOTS; j++)
|
||
+ echan->slot[j] = -1;
|
||
+ }
|
||
+}
|
||
+
|
||
+static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
|
||
+ struct device *dev)
|
||
+{
|
||
+ dma->device_prep_slave_sg = edma_prep_slave_sg;
|
||
+ dma->device_alloc_chan_resources = edma_alloc_chan_resources;
|
||
+ dma->device_free_chan_resources = edma_free_chan_resources;
|
||
+ dma->device_issue_pending = edma_issue_pending;
|
||
+ dma->device_tx_status = edma_tx_status;
|
||
+ dma->device_control = edma_control;
|
||
+ dma->dev = dev;
|
||
+
|
||
+ INIT_LIST_HEAD(&dma->channels);
|
||
+}
|
||
+
|
||
+static int edma_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct edma_cc *ecc;
|
||
+ int ret;
|
||
+
|
||
+ ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
|
||
+ if (!ecc) {
|
||
+ dev_err(&pdev->dev, "Can't allocate controller\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ ecc->ctlr = pdev->id;
|
||
+ ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
|
||
+ if (ecc->dummy_slot < 0) {
|
||
+ dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
|
||
+ return -EIO;
|
||
+ }
|
||
+
|
||
+ dma_cap_zero(ecc->dma_slave.cap_mask);
|
||
+ dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
|
||
+
|
||
+ edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
|
||
+
|
||
+ edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
|
||
+
|
||
+ ret = dma_async_device_register(&ecc->dma_slave);
|
||
+ if (ret)
|
||
+ goto err_reg1;
|
||
+
|
||
+ platform_set_drvdata(pdev, ecc);
|
||
+
|
||
+ dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
|
||
+
|
||
+ return 0;
|
||
+
|
||
+err_reg1:
|
||
+ edma_free_slot(ecc->dummy_slot);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int edma_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct device *dev = &pdev->dev;
|
||
+ struct edma_cc *ecc = dev_get_drvdata(dev);
|
||
+
|
||
+ dma_async_device_unregister(&ecc->dma_slave);
|
||
+ edma_free_slot(ecc->dummy_slot);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct platform_driver edma_driver = {
|
||
+ .probe = edma_probe,
|
||
+ .remove = edma_remove,
|
||
+ .driver = {
|
||
+ .name = "edma-dma-engine",
|
||
+ .owner = THIS_MODULE,
|
||
+ },
|
||
+};
|
||
+
|
||
+bool edma_filter_fn(struct dma_chan *chan, void *param)
|
||
+{
|
||
+ if (chan->device->dev->driver == &edma_driver.driver) {
|
||
+ struct edma_chan *echan = to_edma_chan(chan);
|
||
+ unsigned ch_req = *(unsigned *)param;
|
||
+ return ch_req == echan->ch_num;
|
||
+ }
|
||
+ return false;
|
||
+}
|
||
+EXPORT_SYMBOL(edma_filter_fn);
|
||
+
|
||
+static struct platform_device *pdev0, *pdev1;
|
||
+
|
||
+static const struct platform_device_info edma_dev_info0 = {
|
||
+ .name = "edma-dma-engine",
|
||
+ .id = 0,
|
||
+};
|
||
+
|
||
+static const struct platform_device_info edma_dev_info1 = {
|
||
+ .name = "edma-dma-engine",
|
||
+ .id = 1,
|
||
+};
|
||
+
|
||
+static int edma_init(void)
|
||
+{
|
||
+ int ret = platform_driver_register(&edma_driver);
|
||
+
|
||
+ if (ret == 0) {
|
||
+ pdev0 = platform_device_register_full(&edma_dev_info0);
|
||
+ if (IS_ERR(pdev0)) {
|
||
+ platform_driver_unregister(&edma_driver);
|
||
+ ret = PTR_ERR(pdev0);
|
||
+ goto out;
|
||
+ }
|
||
+ pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
|
||
+ pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||
+ }
|
||
+
|
||
+ if (EDMA_CTLRS == 2) {
|
||
+ pdev1 = platform_device_register_full(&edma_dev_info1);
|
||
+ if (IS_ERR(pdev1)) {
|
||
+ platform_driver_unregister(&edma_driver);
|
||
+ platform_device_unregister(pdev0);
|
||
+ ret = PTR_ERR(pdev1);
|
||
+ }
|
||
+ pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
|
||
+ pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||
+ }
|
||
+
|
||
+out:
|
||
+ return ret;
|
||
+}
|
||
+subsys_initcall(edma_init);
|
||
+
|
||
+static void __exit edma_exit(void)
|
||
+{
|
||
+ platform_device_unregister(pdev0);
|
||
+ if (pdev1)
|
||
+ platform_device_unregister(pdev1);
|
||
+ platform_driver_unregister(&edma_driver);
|
||
+}
|
||
+module_exit(edma_exit);
|
||
+
|
||
+MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
|
||
+MODULE_DESCRIPTION("TI EDMA DMA engine driver");
|
||
+MODULE_LICENSE("GPL v2");
|
||
diff -urN linux-3.0.101/drivers/dma/ep93xx_dma.c linux-3.0.101.xm510/drivers/dma/ep93xx_dma.c
|
||
--- linux-3.0.101/drivers/dma/ep93xx_dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/ep93xx_dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,1453 @@
|
||
+/*
|
||
+ * Driver for the Cirrus Logic EP93xx DMA Controller
|
||
+ *
|
||
+ * Copyright (C) 2011 Mika Westerberg
|
||
+ *
|
||
+ * DMA M2P implementation is based on the original
|
||
+ * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
|
||
+ *
|
||
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
|
||
+ * Copyright (C) 2006 Applied Data Systems
|
||
+ * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
|
||
+ *
|
||
+ * This driver is based on dw_dmac and amba-pl08x drivers.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License as published by
|
||
+ * the Free Software Foundation; either version 2 of the License, or
|
||
+ * (at your option) any later version.
|
||
+ */
|
||
+
|
||
+#include <linux/clk.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/slab.h>
|
||
+
|
||
+#include <linux/platform_data/dma-ep93xx.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+
|
||
+/* M2P registers */
|
||
+#define M2P_CONTROL 0x0000
|
||
+#define M2P_CONTROL_STALLINT BIT(0)
|
||
+#define M2P_CONTROL_NFBINT BIT(1)
|
||
+#define M2P_CONTROL_CH_ERROR_INT BIT(3)
|
||
+#define M2P_CONTROL_ENABLE BIT(4)
|
||
+#define M2P_CONTROL_ICE BIT(6)
|
||
+
|
||
+#define M2P_INTERRUPT 0x0004
|
||
+#define M2P_INTERRUPT_STALL BIT(0)
|
||
+#define M2P_INTERRUPT_NFB BIT(1)
|
||
+#define M2P_INTERRUPT_ERROR BIT(3)
|
||
+
|
||
+#define M2P_PPALLOC 0x0008
|
||
+#define M2P_STATUS 0x000c
|
||
+
|
||
+#define M2P_MAXCNT0 0x0020
|
||
+#define M2P_BASE0 0x0024
|
||
+#define M2P_MAXCNT1 0x0030
|
||
+#define M2P_BASE1 0x0034
|
||
+
|
||
+#define M2P_STATE_IDLE 0
|
||
+#define M2P_STATE_STALL 1
|
||
+#define M2P_STATE_ON 2
|
||
+#define M2P_STATE_NEXT 3
|
||
+
|
||
+/* M2M registers */
|
||
+#define M2M_CONTROL 0x0000
|
||
+#define M2M_CONTROL_DONEINT BIT(2)
|
||
+#define M2M_CONTROL_ENABLE BIT(3)
|
||
+#define M2M_CONTROL_START BIT(4)
|
||
+#define M2M_CONTROL_DAH BIT(11)
|
||
+#define M2M_CONTROL_SAH BIT(12)
|
||
+#define M2M_CONTROL_PW_SHIFT 9
|
||
+#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
|
||
+#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
|
||
+#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
|
||
+#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
|
||
+#define M2M_CONTROL_TM_SHIFT 13
|
||
+#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
|
||
+#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
|
||
+#define M2M_CONTROL_NFBINT BIT(21)
|
||
+#define M2M_CONTROL_RSS_SHIFT 22
|
||
+#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
|
||
+#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
|
||
+#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
|
||
+#define M2M_CONTROL_NO_HDSK BIT(24)
|
||
+#define M2M_CONTROL_PWSC_SHIFT 25
|
||
+
|
||
+#define M2M_INTERRUPT 0x0004
|
||
+#define M2M_INTERRUPT_MASK 6
|
||
+
|
||
+#define M2M_STATUS 0x000c
|
||
+#define M2M_STATUS_CTL_SHIFT 1
|
||
+#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
|
||
+#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
|
||
+#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
|
||
+#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
|
||
+#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
|
||
+#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
|
||
+#define M2M_STATUS_BUF_SHIFT 4
|
||
+#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
|
||
+#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
|
||
+#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
|
||
+#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
|
||
+#define M2M_STATUS_DONE BIT(6)
|
||
+
|
||
+#define M2M_BCR0 0x0010
|
||
+#define M2M_BCR1 0x0014
|
||
+#define M2M_SAR_BASE0 0x0018
|
||
+#define M2M_SAR_BASE1 0x001c
|
||
+#define M2M_DAR_BASE0 0x002c
|
||
+#define M2M_DAR_BASE1 0x0030
|
||
+
|
||
+#define DMA_MAX_CHAN_BYTES 0xffff
|
||
+#define DMA_MAX_CHAN_DESCRIPTORS 32
|
||
+
|
||
+struct ep93xx_dma_engine;
|
||
+
|
||
+/**
|
||
+ * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
|
||
+ * @src_addr: source address of the transaction
|
||
+ * @dst_addr: destination address of the transaction
|
||
+ * @size: size of the transaction (in bytes)
|
||
+ * @complete: this descriptor is completed
|
||
+ * @txd: dmaengine API descriptor
|
||
+ * @tx_list: list of linked descriptors
|
||
+ * @node: link used for putting this into a channel queue
|
||
+ */
|
||
+struct ep93xx_dma_desc {
|
||
+ u32 src_addr;
|
||
+ u32 dst_addr;
|
||
+ size_t size;
|
||
+ bool complete;
|
||
+ struct dma_async_tx_descriptor txd;
|
||
+ struct list_head tx_list;
|
||
+ struct list_head node;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
|
||
+ * @chan: dmaengine API channel
|
||
+ * @edma: pointer to to the engine device
|
||
+ * @regs: memory mapped registers
|
||
+ * @irq: interrupt number of the channel
|
||
+ * @clk: clock used by this channel
|
||
+ * @tasklet: channel specific tasklet used for callbacks
|
||
+ * @lock: lock protecting the fields following
|
||
+ * @flags: flags for the channel
|
||
+ * @buffer: which buffer to use next (0/1)
|
||
+ * @active: flattened chain of descriptors currently being processed
|
||
+ * @queue: pending descriptors which are handled next
|
||
+ * @free_list: list of free descriptors which can be used
|
||
+ * @runtime_addr: physical address currently used as dest/src (M2M only). This
|
||
+ * is set via %DMA_SLAVE_CONFIG before slave operation is
|
||
+ * prepared
|
||
+ * @runtime_ctrl: M2M runtime values for the control register.
|
||
+ *
|
||
+ * As EP93xx DMA controller doesn't support real chained DMA descriptors we
|
||
+ * will have slightly different scheme here: @active points to a head of
|
||
+ * flattened DMA descriptor chain.
|
||
+ *
|
||
+ * @queue holds pending transactions. These are linked through the first
|
||
+ * descriptor in the chain. When a descriptor is moved to the @active queue,
|
||
+ * the first and chained descriptors are flattened into a single list.
|
||
+ *
|
||
+ * @chan.private holds pointer to &struct ep93xx_dma_data which contains
|
||
+ * necessary channel configuration information. For memcpy channels this must
|
||
+ * be %NULL.
|
||
+ */
|
||
+struct ep93xx_dma_chan {
|
||
+ struct dma_chan chan;
|
||
+ const struct ep93xx_dma_engine *edma;
|
||
+ void __iomem *regs;
|
||
+ int irq;
|
||
+ struct clk *clk;
|
||
+ struct tasklet_struct tasklet;
|
||
+ /* protects the fields following */
|
||
+ spinlock_t lock;
|
||
+ unsigned long flags;
|
||
+/* Channel is configured for cyclic transfers */
|
||
+#define EP93XX_DMA_IS_CYCLIC 0
|
||
+
|
||
+ int buffer;
|
||
+ struct list_head active;
|
||
+ struct list_head queue;
|
||
+ struct list_head free_list;
|
||
+ u32 runtime_addr;
|
||
+ u32 runtime_ctrl;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct ep93xx_dma_engine - the EP93xx DMA engine instance
|
||
+ * @dma_dev: holds the dmaengine device
|
||
+ * @m2m: is this an M2M or M2P device
|
||
+ * @hw_setup: method which sets the channel up for operation
|
||
+ * @hw_shutdown: shuts the channel down and flushes whatever is left
|
||
+ * @hw_submit: pushes active descriptor(s) to the hardware
|
||
+ * @hw_interrupt: handle the interrupt
|
||
+ * @num_channels: number of channels for this instance
|
||
+ * @channels: array of channels
|
||
+ *
|
||
+ * There is one instance of this struct for the M2P channels and one for the
|
||
+ * M2M channels. hw_xxx() methods are used to perform operations which are
|
||
+ * different on M2M and M2P channels. These methods are called with channel
|
||
+ * lock held and interrupts disabled so they cannot sleep.
|
||
+ */
|
||
+struct ep93xx_dma_engine {
|
||
+ struct dma_device dma_dev;
|
||
+ bool m2m;
|
||
+ int (*hw_setup)(struct ep93xx_dma_chan *);
|
||
+ void (*hw_shutdown)(struct ep93xx_dma_chan *);
|
||
+ void (*hw_submit)(struct ep93xx_dma_chan *);
|
||
+ int (*hw_interrupt)(struct ep93xx_dma_chan *);
|
||
+#define INTERRUPT_UNKNOWN 0
|
||
+#define INTERRUPT_DONE 1
|
||
+#define INTERRUPT_NEXT_BUFFER 2
|
||
+
|
||
+ size_t num_channels;
|
||
+ struct ep93xx_dma_chan channels[];
|
||
+};
|
||
+
|
||
+static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ return &edmac->chan.dev->device;
|
||
+}
|
||
+
|
||
+static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
|
||
+{
|
||
+ return container_of(chan, struct ep93xx_dma_chan, chan);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_set_active - set new active descriptor chain
|
||
+ * @edmac: channel
|
||
+ * @desc: head of the new active descriptor chain
|
||
+ *
|
||
+ * Sets @desc to be the head of the new active descriptor chain. This is the
|
||
+ * chain which is processed next. The active list must be empty before calling
|
||
+ * this function.
|
||
+ *
|
||
+ * Called with @edmac->lock held and interrupts disabled.
|
||
+ */
|
||
+static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
|
||
+ struct ep93xx_dma_desc *desc)
|
||
+{
|
||
+ BUG_ON(!list_empty(&edmac->active));
|
||
+
|
||
+ list_add_tail(&desc->node, &edmac->active);
|
||
+
|
||
+ /* Flatten the @desc->tx_list chain into @edmac->active list */
|
||
+ while (!list_empty(&desc->tx_list)) {
|
||
+ struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
|
||
+ struct ep93xx_dma_desc, node);
|
||
+
|
||
+ /*
|
||
+ * We copy the callback parameters from the first descriptor
|
||
+ * to all the chained descriptors. This way we can call the
|
||
+ * callback without having to find out the first descriptor in
|
||
+ * the chain. Useful for cyclic transfers.
|
||
+ */
|
||
+ d->txd.callback = desc->txd.callback;
|
||
+ d->txd.callback_param = desc->txd.callback_param;
|
||
+
|
||
+ list_move_tail(&d->node, &edmac->active);
|
||
+ }
|
||
+}
|
||
+
|
||
+/* Called with @edmac->lock held and interrupts disabled */
|
||
+static struct ep93xx_dma_desc *
|
||
+ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ if (list_empty(&edmac->active))
|
||
+ return NULL;
|
||
+
|
||
+ return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_advance_active - advances to the next active descriptor
|
||
+ * @edmac: channel
|
||
+ *
|
||
+ * Function advances active descriptor to the next in the @edmac->active and
|
||
+ * returns %true if we still have descriptors in the chain to process.
|
||
+ * Otherwise returns %false.
|
||
+ *
|
||
+ * When the channel is in cyclic mode always returns %true.
|
||
+ *
|
||
+ * Called with @edmac->lock held and interrupts disabled.
|
||
+ */
|
||
+static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_desc *desc;
|
||
+
|
||
+ list_rotate_left(&edmac->active);
|
||
+
|
||
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
|
||
+ return true;
|
||
+
|
||
+ desc = ep93xx_dma_get_active(edmac);
|
||
+ if (!desc)
|
||
+ return false;
|
||
+
|
||
+ /*
|
||
+ * If txd.cookie is set it means that we are back in the first
|
||
+ * descriptor in the chain and hence done with it.
|
||
+ */
|
||
+ return !desc->txd.cookie;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * M2P DMA implementation
|
||
+ */
|
||
+
|
||
+static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
|
||
+{
|
||
+ writel(control, edmac->regs + M2P_CONTROL);
|
||
+ /*
|
||
+ * EP93xx User's Guide states that we must perform a dummy read after
|
||
+ * write to the control register.
|
||
+ */
|
||
+ readl(edmac->regs + M2P_CONTROL);
|
||
+}
|
||
+
|
||
+static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_data *data = edmac->chan.private;
|
||
+ u32 control;
|
||
+
|
||
+ writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
|
||
+
|
||
+ control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
|
||
+ | M2P_CONTROL_ENABLE;
|
||
+ m2p_set_control(edmac, control);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
|
||
+}
|
||
+
|
||
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ u32 control;
|
||
+
|
||
+ control = readl(edmac->regs + M2P_CONTROL);
|
||
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
|
||
+ m2p_set_control(edmac, control);
|
||
+
|
||
+ while (m2p_channel_state(edmac) >= M2P_STATE_ON)
|
||
+ cpu_relax();
|
||
+
|
||
+ m2p_set_control(edmac, 0);
|
||
+
|
||
+ while (m2p_channel_state(edmac) == M2P_STATE_STALL)
|
||
+ cpu_relax();
|
||
+}
|
||
+
|
||
+static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_desc *desc;
|
||
+ u32 bus_addr;
|
||
+
|
||
+ desc = ep93xx_dma_get_active(edmac);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
|
||
+ bus_addr = desc->src_addr;
|
||
+ else
|
||
+ bus_addr = desc->dst_addr;
|
||
+
|
||
+ if (edmac->buffer == 0) {
|
||
+ writel(desc->size, edmac->regs + M2P_MAXCNT0);
|
||
+ writel(bus_addr, edmac->regs + M2P_BASE0);
|
||
+ } else {
|
||
+ writel(desc->size, edmac->regs + M2P_MAXCNT1);
|
||
+ writel(bus_addr, edmac->regs + M2P_BASE1);
|
||
+ }
|
||
+
|
||
+ edmac->buffer ^= 1;
|
||
+}
|
||
+
|
||
+static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ u32 control = readl(edmac->regs + M2P_CONTROL);
|
||
+
|
||
+ m2p_fill_desc(edmac);
|
||
+ control |= M2P_CONTROL_STALLINT;
|
||
+
|
||
+ if (ep93xx_dma_advance_active(edmac)) {
|
||
+ m2p_fill_desc(edmac);
|
||
+ control |= M2P_CONTROL_NFBINT;
|
||
+ }
|
||
+
|
||
+ m2p_set_control(edmac, control);
|
||
+}
|
||
+
|
||
+static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
|
||
+ u32 control;
|
||
+
|
||
+ if (irq_status & M2P_INTERRUPT_ERROR) {
|
||
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
|
||
+
|
||
+ /* Clear the error interrupt */
|
||
+ writel(1, edmac->regs + M2P_INTERRUPT);
|
||
+
|
||
+ /*
|
||
+ * It seems that there is no easy way of reporting errors back
|
||
+ * to client so we just report the error here and continue as
|
||
+ * usual.
|
||
+ *
|
||
+ * Revisit this when there is a mechanism to report back the
|
||
+ * errors.
|
||
+ */
|
||
+ dev_err(chan2dev(edmac),
|
||
+ "DMA transfer failed! Details:\n"
|
||
+ "\tcookie : %d\n"
|
||
+ "\tsrc_addr : 0x%08x\n"
|
||
+ "\tdst_addr : 0x%08x\n"
|
||
+ "\tsize : %zu\n",
|
||
+ desc->txd.cookie, desc->src_addr, desc->dst_addr,
|
||
+ desc->size);
|
||
+ }
|
||
+
|
||
+ switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
|
||
+ case M2P_INTERRUPT_STALL:
|
||
+ /* Disable interrupts */
|
||
+ control = readl(edmac->regs + M2P_CONTROL);
|
||
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
|
||
+ m2p_set_control(edmac, control);
|
||
+
|
||
+ return INTERRUPT_DONE;
|
||
+
|
||
+ case M2P_INTERRUPT_NFB:
|
||
+ if (ep93xx_dma_advance_active(edmac))
|
||
+ m2p_fill_desc(edmac);
|
||
+
|
||
+ return INTERRUPT_NEXT_BUFFER;
|
||
+ }
|
||
+
|
||
+ return INTERRUPT_UNKNOWN;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * M2M DMA implementation
|
||
+ */
|
||
+
|
||
+static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ const struct ep93xx_dma_data *data = edmac->chan.private;
|
||
+ u32 control = 0;
|
||
+
|
||
+ if (!data) {
|
||
+ /* This is memcpy channel, nothing to configure */
|
||
+ writel(control, edmac->regs + M2M_CONTROL);
|
||
+ return 0;
|
||
+ }
|
||
+
|
||
+ switch (data->port) {
|
||
+ case EP93XX_DMA_SSP:
|
||
+ /*
|
||
+ * This was found via experimenting - anything less than 5
|
||
+ * causes the channel to perform only a partial transfer which
|
||
+ * leads to problems since we don't get DONE interrupt then.
|
||
+ */
|
||
+ control = (5 << M2M_CONTROL_PWSC_SHIFT);
|
||
+ control |= M2M_CONTROL_NO_HDSK;
|
||
+
|
||
+ if (data->direction == DMA_MEM_TO_DEV) {
|
||
+ control |= M2M_CONTROL_DAH;
|
||
+ control |= M2M_CONTROL_TM_TX;
|
||
+ control |= M2M_CONTROL_RSS_SSPTX;
|
||
+ } else {
|
||
+ control |= M2M_CONTROL_SAH;
|
||
+ control |= M2M_CONTROL_TM_RX;
|
||
+ control |= M2M_CONTROL_RSS_SSPRX;
|
||
+ }
|
||
+ break;
|
||
+
|
||
+ case EP93XX_DMA_IDE:
|
||
+ /*
|
||
+ * This IDE part is totally untested. Values below are taken
|
||
+ * from the EP93xx Users's Guide and might not be correct.
|
||
+ */
|
||
+ if (data->direction == DMA_MEM_TO_DEV) {
|
||
+ /* Worst case from the UG */
|
||
+ control = (3 << M2M_CONTROL_PWSC_SHIFT);
|
||
+ control |= M2M_CONTROL_DAH;
|
||
+ control |= M2M_CONTROL_TM_TX;
|
||
+ } else {
|
||
+ control = (2 << M2M_CONTROL_PWSC_SHIFT);
|
||
+ control |= M2M_CONTROL_SAH;
|
||
+ control |= M2M_CONTROL_TM_RX;
|
||
+ }
|
||
+
|
||
+ control |= M2M_CONTROL_NO_HDSK;
|
||
+ control |= M2M_CONTROL_RSS_IDE;
|
||
+ control |= M2M_CONTROL_PW_16;
|
||
+ break;
|
||
+
|
||
+ default:
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ writel(control, edmac->regs + M2M_CONTROL);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ /* Just disable the channel */
|
||
+ writel(0, edmac->regs + M2M_CONTROL);
|
||
+}
|
||
+
|
||
+static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_desc *desc;
|
||
+
|
||
+ desc = ep93xx_dma_get_active(edmac);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (edmac->buffer == 0) {
|
||
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
|
||
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
|
||
+ writel(desc->size, edmac->regs + M2M_BCR0);
|
||
+ } else {
|
||
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
|
||
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
|
||
+ writel(desc->size, edmac->regs + M2M_BCR1);
|
||
+ }
|
||
+
|
||
+ edmac->buffer ^= 1;
|
||
+}
|
||
+
|
||
+static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_data *data = edmac->chan.private;
|
||
+ u32 control = readl(edmac->regs + M2M_CONTROL);
|
||
+
|
||
+ /*
|
||
+ * Since we allow clients to configure PW (peripheral width) we always
|
||
+ * clear PW bits here and then set them according what is given in
|
||
+ * the runtime configuration.
|
||
+ */
|
||
+ control &= ~M2M_CONTROL_PW_MASK;
|
||
+ control |= edmac->runtime_ctrl;
|
||
+
|
||
+ m2m_fill_desc(edmac);
|
||
+ control |= M2M_CONTROL_DONEINT;
|
||
+
|
||
+ if (ep93xx_dma_advance_active(edmac)) {
|
||
+ m2m_fill_desc(edmac);
|
||
+ control |= M2M_CONTROL_NFBINT;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Now we can finally enable the channel. For M2M channel this must be
|
||
+ * done _after_ the BCRx registers are programmed.
|
||
+ */
|
||
+ control |= M2M_CONTROL_ENABLE;
|
||
+ writel(control, edmac->regs + M2M_CONTROL);
|
||
+
|
||
+ if (!data) {
|
||
+ /*
|
||
+ * For memcpy channels the software trigger must be asserted
|
||
+ * in order to start the memcpy operation.
|
||
+ */
|
||
+ control |= M2M_CONTROL_START;
|
||
+ writel(control, edmac->regs + M2M_CONTROL);
|
||
+ }
|
||
+}
|
||
+
|
||
+/*
|
||
+ * According to EP93xx User's Guide, we should receive DONE interrupt when all
|
||
+ * M2M DMA controller transactions complete normally. This is not always the
|
||
+ * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
|
||
+ * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
|
||
+ * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
|
||
+ * In effect, disabling the channel when only DONE bit is set could stop
|
||
+ * currently running DMA transfer. To avoid this, we use Buffer FSM and
|
||
+ * Control FSM to check current state of DMA channel.
|
||
+ */
|
||
+static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ u32 status = readl(edmac->regs + M2M_STATUS);
|
||
+ u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
|
||
+ u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
|
||
+ bool done = status & M2M_STATUS_DONE;
|
||
+ bool last_done;
|
||
+ u32 control;
|
||
+ struct ep93xx_dma_desc *desc;
|
||
+
|
||
+ /* Accept only DONE and NFB interrupts */
|
||
+ if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
|
||
+ return INTERRUPT_UNKNOWN;
|
||
+
|
||
+ if (done) {
|
||
+ /* Clear the DONE bit */
|
||
+ writel(0, edmac->regs + M2M_INTERRUPT);
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Check whether we are done with descriptors or not. This, together
|
||
+ * with DMA channel state, determines action to take in interrupt.
|
||
+ */
|
||
+ desc = ep93xx_dma_get_active(edmac);
|
||
+ last_done = !desc || desc->txd.cookie;
|
||
+
|
||
+ /*
|
||
+ * Use M2M DMA Buffer FSM and Control FSM to check current state of
|
||
+ * DMA channel. Using DONE and NFB bits from channel status register
|
||
+ * or bits from channel interrupt register is not reliable.
|
||
+ */
|
||
+ if (!last_done &&
|
||
+ (buf_fsm == M2M_STATUS_BUF_NO ||
|
||
+ buf_fsm == M2M_STATUS_BUF_ON)) {
|
||
+ /*
|
||
+ * Two buffers are ready for update when Buffer FSM is in
|
||
+ * DMA_NO_BUF state. Only one buffer can be prepared without
|
||
+ * disabling the channel or polling the DONE bit.
|
||
+ * To simplify things, always prepare only one buffer.
|
||
+ */
|
||
+ if (ep93xx_dma_advance_active(edmac)) {
|
||
+ m2m_fill_desc(edmac);
|
||
+ if (done && !edmac->chan.private) {
|
||
+ /* Software trigger for memcpy channel */
|
||
+ control = readl(edmac->regs + M2M_CONTROL);
|
||
+ control |= M2M_CONTROL_START;
|
||
+ writel(control, edmac->regs + M2M_CONTROL);
|
||
+ }
|
||
+ return INTERRUPT_NEXT_BUFFER;
|
||
+ } else {
|
||
+ last_done = true;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
|
||
+ * and Control FSM is in DMA_STALL state.
|
||
+ */
|
||
+ if (last_done &&
|
||
+ buf_fsm == M2M_STATUS_BUF_NO &&
|
||
+ ctl_fsm == M2M_STATUS_CTL_STALL) {
|
||
+ /* Disable interrupts and the channel */
|
||
+ control = readl(edmac->regs + M2M_CONTROL);
|
||
+ control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
|
||
+ | M2M_CONTROL_ENABLE);
|
||
+ writel(control, edmac->regs + M2M_CONTROL);
|
||
+ return INTERRUPT_DONE;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Nothing to do this time.
|
||
+ */
|
||
+ return INTERRUPT_NEXT_BUFFER;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * DMA engine API implementation
|
||
+ */
|
||
+
|
||
+static struct ep93xx_dma_desc *
|
||
+ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_desc *desc, *_desc;
|
||
+ struct ep93xx_dma_desc *ret = NULL;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
|
||
+ if (async_tx_test_ack(&desc->txd)) {
|
||
+ list_del_init(&desc->node);
|
||
+
|
||
+ /* Re-initialize the descriptor */
|
||
+ desc->src_addr = 0;
|
||
+ desc->dst_addr = 0;
|
||
+ desc->size = 0;
|
||
+ desc->complete = false;
|
||
+ desc->txd.cookie = 0;
|
||
+ desc->txd.callback = NULL;
|
||
+ desc->txd.callback_param = NULL;
|
||
+
|
||
+ ret = desc;
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
|
||
+ struct ep93xx_dma_desc *desc)
|
||
+{
|
||
+ if (desc) {
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ list_splice_init(&desc->tx_list, &edmac->free_list);
|
||
+ list_add(&desc->node, &edmac->free_list);
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+ }
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_advance_work - start processing the next pending transaction
|
||
+ * @edmac: channel
|
||
+ *
|
||
+ * If we have pending transactions queued and we are currently idling, this
|
||
+ * function takes the next queued transaction from the @edmac->queue and
|
||
+ * pushes it to the hardware for execution.
|
||
+ */
|
||
+static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_desc *new;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ /* Take the next descriptor from the pending queue */
|
||
+ new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
|
||
+ list_del_init(&new->node);
|
||
+
|
||
+ ep93xx_dma_set_active(edmac, new);
|
||
+
|
||
+ /* Push it to the hardware */
|
||
+ edmac->edma->hw_submit(edmac);
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+}
|
||
+
|
||
+static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
|
||
+{
|
||
+ struct device *dev = desc->txd.chan->device->dev;
|
||
+
|
||
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
|
||
+ if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
|
||
+ dma_unmap_single(dev, desc->src_addr, desc->size,
|
||
+ DMA_TO_DEVICE);
|
||
+ else
|
||
+ dma_unmap_page(dev, desc->src_addr, desc->size,
|
||
+ DMA_TO_DEVICE);
|
||
+ }
|
||
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||
+ if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
|
||
+ dma_unmap_single(dev, desc->dst_addr, desc->size,
|
||
+ DMA_FROM_DEVICE);
|
||
+ else
|
||
+ dma_unmap_page(dev, desc->dst_addr, desc->size,
|
||
+ DMA_FROM_DEVICE);
|
||
+ }
|
||
+}
|
||
+
|
||
+static void ep93xx_dma_tasklet(unsigned long data)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
|
||
+ struct ep93xx_dma_desc *desc, *d;
|
||
+ dma_async_tx_callback callback = NULL;
|
||
+ void *callback_param = NULL;
|
||
+ LIST_HEAD(list);
|
||
+
|
||
+ spin_lock_irq(&edmac->lock);
|
||
+ /*
|
||
+ * If dma_terminate_all() was called before we get to run, the active
|
||
+ * list has become empty. If that happens we aren't supposed to do
|
||
+ * anything more than call ep93xx_dma_advance_work().
|
||
+ */
|
||
+ desc = ep93xx_dma_get_active(edmac);
|
||
+ if (desc) {
|
||
+ if (desc->complete) {
|
||
+ /* mark descriptor complete for non cyclic case only */
|
||
+ if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
|
||
+ dma_cookie_complete(&desc->txd);
|
||
+ list_splice_init(&edmac->active, &list);
|
||
+ }
|
||
+ callback = desc->txd.callback;
|
||
+ callback_param = desc->txd.callback_param;
|
||
+ }
|
||
+ spin_unlock_irq(&edmac->lock);
|
||
+
|
||
+ /* Pick up the next descriptor from the queue */
|
||
+ ep93xx_dma_advance_work(edmac);
|
||
+
|
||
+ /* Now we can release all the chained descriptors */
|
||
+ list_for_each_entry_safe(desc, d, &list, node) {
|
||
+ /*
|
||
+ * For the memcpy channels the API requires us to unmap the
|
||
+ * buffers unless requested otherwise.
|
||
+ */
|
||
+ if (!edmac->chan.private)
|
||
+ ep93xx_dma_unmap_buffers(desc);
|
||
+
|
||
+ ep93xx_dma_desc_put(edmac, desc);
|
||
+ }
|
||
+
|
||
+ if (callback)
|
||
+ callback(callback_param);
|
||
+}
|
||
+
|
||
+static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = dev_id;
|
||
+ struct ep93xx_dma_desc *desc;
|
||
+ irqreturn_t ret = IRQ_HANDLED;
|
||
+
|
||
+ spin_lock(&edmac->lock);
|
||
+
|
||
+ desc = ep93xx_dma_get_active(edmac);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(edmac),
|
||
+ "got interrupt while active list is empty\n");
|
||
+ spin_unlock(&edmac->lock);
|
||
+ return IRQ_NONE;
|
||
+ }
|
||
+
|
||
+ switch (edmac->edma->hw_interrupt(edmac)) {
|
||
+ case INTERRUPT_DONE:
|
||
+ desc->complete = true;
|
||
+ tasklet_schedule(&edmac->tasklet);
|
||
+ break;
|
||
+
|
||
+ case INTERRUPT_NEXT_BUFFER:
|
||
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
|
||
+ tasklet_schedule(&edmac->tasklet);
|
||
+ break;
|
||
+
|
||
+ default:
|
||
+ dev_warn(chan2dev(edmac), "unknown interrupt!\n");
|
||
+ ret = IRQ_NONE;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ spin_unlock(&edmac->lock);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
|
||
+ * @tx: descriptor to be executed
|
||
+ *
|
||
+ * Function will execute given descriptor on the hardware or if the hardware
|
||
+ * is busy, queue the descriptor to be executed later on. Returns cookie which
|
||
+ * can be used to poll the status of the descriptor.
|
||
+ */
|
||
+static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
|
||
+ struct ep93xx_dma_desc *desc;
|
||
+ dma_cookie_t cookie;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ cookie = dma_cookie_assign(tx);
|
||
+
|
||
+ desc = container_of(tx, struct ep93xx_dma_desc, txd);
|
||
+
|
||
+ /*
|
||
+ * If nothing is currently prosessed, we push this descriptor
|
||
+ * directly to the hardware. Otherwise we put the descriptor
|
||
+ * to the pending queue.
|
||
+ */
|
||
+ if (list_empty(&edmac->active)) {
|
||
+ ep93xx_dma_set_active(edmac, desc);
|
||
+ edmac->edma->hw_submit(edmac);
|
||
+ } else {
|
||
+ list_add_tail(&desc->node, &edmac->queue);
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
|
||
+ * @chan: channel to allocate resources
|
||
+ *
|
||
+ * Function allocates necessary resources for the given DMA channel and
|
||
+ * returns number of allocated descriptors for the channel. Negative errno
|
||
+ * is returned in case of failure.
|
||
+ */
|
||
+static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||
+ struct ep93xx_dma_data *data = chan->private;
|
||
+ const char *name = dma_chan_name(chan);
|
||
+ int ret, i;
|
||
+
|
||
+ /* Sanity check the channel parameters */
|
||
+ if (!edmac->edma->m2m) {
|
||
+ if (!data)
|
||
+ return -EINVAL;
|
||
+ if (data->port < EP93XX_DMA_I2S1 ||
|
||
+ data->port > EP93XX_DMA_IRDA)
|
||
+ return -EINVAL;
|
||
+ if (data->direction != ep93xx_dma_chan_direction(chan))
|
||
+ return -EINVAL;
|
||
+ } else {
|
||
+ if (data) {
|
||
+ switch (data->port) {
|
||
+ case EP93XX_DMA_SSP:
|
||
+ case EP93XX_DMA_IDE:
|
||
+ if (!is_slave_direction(data->direction))
|
||
+ return -EINVAL;
|
||
+ break;
|
||
+ default:
|
||
+ return -EINVAL;
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if (data && data->name)
|
||
+ name = data->name;
|
||
+
|
||
+ ret = clk_enable(edmac->clk);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+
|
||
+ ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
|
||
+ if (ret)
|
||
+ goto fail_clk_disable;
|
||
+
|
||
+ spin_lock_irq(&edmac->lock);
|
||
+ dma_cookie_init(&edmac->chan);
|
||
+ ret = edmac->edma->hw_setup(edmac);
|
||
+ spin_unlock_irq(&edmac->lock);
|
||
+
|
||
+ if (ret)
|
||
+ goto fail_free_irq;
|
||
+
|
||
+ for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
|
||
+ struct ep93xx_dma_desc *desc;
|
||
+
|
||
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(edmac), "not enough descriptors\n");
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ INIT_LIST_HEAD(&desc->tx_list);
|
||
+
|
||
+ dma_async_tx_descriptor_init(&desc->txd, chan);
|
||
+ desc->txd.flags = DMA_CTRL_ACK;
|
||
+ desc->txd.tx_submit = ep93xx_dma_tx_submit;
|
||
+
|
||
+ ep93xx_dma_desc_put(edmac, desc);
|
||
+ }
|
||
+
|
||
+ return i;
|
||
+
|
||
+fail_free_irq:
|
||
+ free_irq(edmac->irq, edmac);
|
||
+fail_clk_disable:
|
||
+ clk_disable(edmac->clk);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_free_chan_resources - release resources for the channel
|
||
+ * @chan: channel
|
||
+ *
|
||
+ * Function releases all the resources allocated for the given channel.
|
||
+ * The channel must be idle when this is called.
|
||
+ */
|
||
+static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||
+ struct ep93xx_dma_desc *desc, *d;
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(list);
|
||
+
|
||
+ BUG_ON(!list_empty(&edmac->active));
|
||
+ BUG_ON(!list_empty(&edmac->queue));
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ edmac->edma->hw_shutdown(edmac);
|
||
+ edmac->runtime_addr = 0;
|
||
+ edmac->runtime_ctrl = 0;
|
||
+ edmac->buffer = 0;
|
||
+ list_splice_init(&edmac->free_list, &list);
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+
|
||
+ list_for_each_entry_safe(desc, d, &list, node)
|
||
+ kfree(desc);
|
||
+
|
||
+ clk_disable(edmac->clk);
|
||
+ free_irq(edmac->irq, edmac);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
|
||
+ * @chan: channel
|
||
+ * @dest: destination bus address
|
||
+ * @src: source bus address
|
||
+ * @len: size of the transaction
|
||
+ * @flags: flags for the descriptor
|
||
+ *
|
||
+ * Returns a valid DMA descriptor or %NULL in case of failure.
|
||
+ */
|
||
+static struct dma_async_tx_descriptor *
|
||
+ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
|
||
+ dma_addr_t src, size_t len, unsigned long flags)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||
+ struct ep93xx_dma_desc *desc, *first;
|
||
+ size_t bytes, offset;
|
||
+
|
||
+ first = NULL;
|
||
+ for (offset = 0; offset < len; offset += bytes) {
|
||
+ desc = ep93xx_dma_desc_get(edmac);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
|
||
+ goto fail;
|
||
+ }
|
||
+
|
||
+ bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
|
||
+
|
||
+ desc->src_addr = src + offset;
|
||
+ desc->dst_addr = dest + offset;
|
||
+ desc->size = bytes;
|
||
+
|
||
+ if (!first)
|
||
+ first = desc;
|
||
+ else
|
||
+ list_add_tail(&desc->node, &first->tx_list);
|
||
+ }
|
||
+
|
||
+ first->txd.cookie = -EBUSY;
|
||
+ first->txd.flags = flags;
|
||
+
|
||
+ return &first->txd;
|
||
+fail:
|
||
+ ep93xx_dma_desc_put(edmac, first);
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
|
||
+ * @chan: channel
|
||
+ * @sgl: list of buffers to transfer
|
||
+ * @sg_len: number of entries in @sgl
|
||
+ * @dir: direction of tha DMA transfer
|
||
+ * @flags: flags for the descriptor
|
||
+ * @context: operation context (ignored)
|
||
+ *
|
||
+ * Returns a valid DMA descriptor or %NULL in case of failure.
|
||
+ */
|
||
+static struct dma_async_tx_descriptor *
|
||
+ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||
+ unsigned int sg_len, enum dma_transfer_direction dir,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||
+ struct ep93xx_dma_desc *desc, *first;
|
||
+ struct scatterlist *sg;
|
||
+ int i;
|
||
+
|
||
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
|
||
+ dev_warn(chan2dev(edmac),
|
||
+ "channel was configured with different direction\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
|
||
+ dev_warn(chan2dev(edmac),
|
||
+ "channel is already used for cyclic transfers\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ first = NULL;
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ size_t sg_len = sg_dma_len(sg);
|
||
+
|
||
+ if (sg_len > DMA_MAX_CHAN_BYTES) {
|
||
+ dev_warn(chan2dev(edmac), "too big transfer size %d\n",
|
||
+ sg_len);
|
||
+ goto fail;
|
||
+ }
|
||
+
|
||
+ desc = ep93xx_dma_desc_get(edmac);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
|
||
+ goto fail;
|
||
+ }
|
||
+
|
||
+ if (dir == DMA_MEM_TO_DEV) {
|
||
+ desc->src_addr = sg_dma_address(sg);
|
||
+ desc->dst_addr = edmac->runtime_addr;
|
||
+ } else {
|
||
+ desc->src_addr = edmac->runtime_addr;
|
||
+ desc->dst_addr = sg_dma_address(sg);
|
||
+ }
|
||
+ desc->size = sg_len;
|
||
+
|
||
+ if (!first)
|
||
+ first = desc;
|
||
+ else
|
||
+ list_add_tail(&desc->node, &first->tx_list);
|
||
+ }
|
||
+
|
||
+ first->txd.cookie = -EBUSY;
|
||
+ first->txd.flags = flags;
|
||
+
|
||
+ return &first->txd;
|
||
+
|
||
+fail:
|
||
+ ep93xx_dma_desc_put(edmac, first);
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
|
||
+ * @chan: channel
|
||
+ * @dma_addr: DMA mapped address of the buffer
|
||
+ * @buf_len: length of the buffer (in bytes)
|
||
+ * @period_len: length of a single period
|
||
+ * @dir: direction of the operation
|
||
+ * @flags: tx descriptor status flags
|
||
+ * @context: operation context (ignored)
|
||
+ *
|
||
+ * Prepares a descriptor for cyclic DMA operation. This means that once the
|
||
+ * descriptor is submitted, we will be submitting in a @period_len sized
|
||
+ * buffers and calling callback once the period has been elapsed. Transfer
|
||
+ * terminates only when client calls dmaengine_terminate_all() for this
|
||
+ * channel.
|
||
+ *
|
||
+ * Returns a valid DMA descriptor or %NULL in case of failure.
|
||
+ */
|
||
+static struct dma_async_tx_descriptor *
|
||
+ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
|
||
+ size_t buf_len, size_t period_len,
|
||
+ enum dma_transfer_direction dir, unsigned long flags,
|
||
+ void *context)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||
+ struct ep93xx_dma_desc *desc, *first;
|
||
+ size_t offset = 0;
|
||
+
|
||
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
|
||
+ dev_warn(chan2dev(edmac),
|
||
+ "channel was configured with different direction\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
|
||
+ dev_warn(chan2dev(edmac),
|
||
+ "channel is already used for cyclic transfers\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (period_len > DMA_MAX_CHAN_BYTES) {
|
||
+ dev_warn(chan2dev(edmac), "too big period length %d\n",
|
||
+ period_len);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Split the buffer into period size chunks */
|
||
+ first = NULL;
|
||
+ for (offset = 0; offset < buf_len; offset += period_len) {
|
||
+ desc = ep93xx_dma_desc_get(edmac);
|
||
+ if (!desc) {
|
||
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
|
||
+ goto fail;
|
||
+ }
|
||
+
|
||
+ if (dir == DMA_MEM_TO_DEV) {
|
||
+ desc->src_addr = dma_addr + offset;
|
||
+ desc->dst_addr = edmac->runtime_addr;
|
||
+ } else {
|
||
+ desc->src_addr = edmac->runtime_addr;
|
||
+ desc->dst_addr = dma_addr + offset;
|
||
+ }
|
||
+
|
||
+ desc->size = period_len;
|
||
+
|
||
+ if (!first)
|
||
+ first = desc;
|
||
+ else
|
||
+ list_add_tail(&desc->node, &first->tx_list);
|
||
+ }
|
||
+
|
||
+ first->txd.cookie = -EBUSY;
|
||
+
|
||
+ return &first->txd;
|
||
+
|
||
+fail:
|
||
+ ep93xx_dma_desc_put(edmac, first);
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_terminate_all - terminate all transactions
|
||
+ * @edmac: channel
|
||
+ *
|
||
+ * Stops all DMA transactions. All descriptors are put back to the
|
||
+ * @edmac->free_list and callbacks are _not_ called.
|
||
+ */
|
||
+static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
|
||
+{
|
||
+ struct ep93xx_dma_desc *desc, *_d;
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(list);
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ /* First we disable and flush the DMA channel */
|
||
+ edmac->edma->hw_shutdown(edmac);
|
||
+ clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
|
||
+ list_splice_init(&edmac->active, &list);
|
||
+ list_splice_init(&edmac->queue, &list);
|
||
+ /*
|
||
+ * We then re-enable the channel. This way we can continue submitting
|
||
+ * the descriptors by just calling ->hw_submit() again.
|
||
+ */
|
||
+ edmac->edma->hw_setup(edmac);
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+
|
||
+ list_for_each_entry_safe(desc, _d, &list, node)
|
||
+ ep93xx_dma_desc_put(edmac, desc);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
|
||
+ struct dma_slave_config *config)
|
||
+{
|
||
+ enum dma_slave_buswidth width;
|
||
+ unsigned long flags;
|
||
+ u32 addr, ctrl;
|
||
+
|
||
+ if (!edmac->edma->m2m)
|
||
+ return -EINVAL;
|
||
+
|
||
+ switch (config->direction) {
|
||
+ case DMA_DEV_TO_MEM:
|
||
+ width = config->src_addr_width;
|
||
+ addr = config->src_addr;
|
||
+ break;
|
||
+
|
||
+ case DMA_MEM_TO_DEV:
|
||
+ width = config->dst_addr_width;
|
||
+ addr = config->dst_addr;
|
||
+ break;
|
||
+
|
||
+ default:
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ switch (width) {
|
||
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||
+ ctrl = 0;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||
+ ctrl = M2M_CONTROL_PW_16;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||
+ ctrl = M2M_CONTROL_PW_32;
|
||
+ break;
|
||
+ default:
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ edmac->runtime_addr = addr;
|
||
+ edmac->runtime_ctrl = ctrl;
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_control - manipulate all pending operations on a channel
|
||
+ * @chan: channel
|
||
+ * @cmd: control command to perform
|
||
+ * @arg: optional argument
|
||
+ *
|
||
+ * Controls the channel. Function returns %0 in case of success or negative
|
||
+ * error in case of failure.
|
||
+ */
|
||
+static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||
+ struct dma_slave_config *config;
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ return ep93xx_dma_terminate_all(edmac);
|
||
+
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ config = (struct dma_slave_config *)arg;
|
||
+ return ep93xx_dma_slave_config(edmac, config);
|
||
+
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ return -ENOSYS;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_tx_status - check if a transaction is completed
|
||
+ * @chan: channel
|
||
+ * @cookie: transaction specific cookie
|
||
+ * @state: state of the transaction is stored here if given
|
||
+ *
|
||
+ * This function can be used to query state of a given transaction.
|
||
+ */
|
||
+static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie,
|
||
+ struct dma_tx_state *state)
|
||
+{
|
||
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||
+ enum dma_status ret;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&edmac->lock, flags);
|
||
+ ret = dma_cookie_status(chan, cookie, state);
|
||
+ spin_unlock_irqrestore(&edmac->lock, flags);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * ep93xx_dma_issue_pending - push pending transactions to the hardware
|
||
+ * @chan: channel
|
||
+ *
|
||
+ * When this function is called, all pending transactions are pushed to the
|
||
+ * hardware and executed.
|
||
+ */
|
||
+static void ep93xx_dma_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
|
||
+}
|
||
+
|
||
+static int __init ep93xx_dma_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
||
+ struct ep93xx_dma_engine *edma;
|
||
+ struct dma_device *dma_dev;
|
||
+ size_t edma_size;
|
||
+ int ret, i;
|
||
+
|
||
+ edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
|
||
+ edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
|
||
+ if (!edma)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ dma_dev = &edma->dma_dev;
|
||
+ edma->m2m = platform_get_device_id(pdev)->driver_data;
|
||
+ edma->num_channels = pdata->num_channels;
|
||
+
|
||
+ INIT_LIST_HEAD(&dma_dev->channels);
|
||
+ for (i = 0; i < pdata->num_channels; i++) {
|
||
+ const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
|
||
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
|
||
+
|
||
+ edmac->chan.device = dma_dev;
|
||
+ edmac->regs = cdata->base;
|
||
+ edmac->irq = cdata->irq;
|
||
+ edmac->edma = edma;
|
||
+
|
||
+ edmac->clk = clk_get(NULL, cdata->name);
|
||
+ if (IS_ERR(edmac->clk)) {
|
||
+ dev_warn(&pdev->dev, "failed to get clock for %s\n",
|
||
+ cdata->name);
|
||
+ continue;
|
||
+ }
|
||
+
|
||
+ spin_lock_init(&edmac->lock);
|
||
+ INIT_LIST_HEAD(&edmac->active);
|
||
+ INIT_LIST_HEAD(&edmac->queue);
|
||
+ INIT_LIST_HEAD(&edmac->free_list);
|
||
+ tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
|
||
+ (unsigned long)edmac);
|
||
+
|
||
+ list_add_tail(&edmac->chan.device_node,
|
||
+ &dma_dev->channels);
|
||
+ }
|
||
+
|
||
+ dma_cap_zero(dma_dev->cap_mask);
|
||
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||
+ dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
|
||
+
|
||
+ dma_dev->dev = &pdev->dev;
|
||
+ dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
|
||
+ dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
|
||
+ dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
|
||
+ dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
|
||
+ dma_dev->device_control = ep93xx_dma_control;
|
||
+ dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
|
||
+ dma_dev->device_tx_status = ep93xx_dma_tx_status;
|
||
+
|
||
+ dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
|
||
+
|
||
+ if (edma->m2m) {
|
||
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
||
+ dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
|
||
+
|
||
+ edma->hw_setup = m2m_hw_setup;
|
||
+ edma->hw_shutdown = m2m_hw_shutdown;
|
||
+ edma->hw_submit = m2m_hw_submit;
|
||
+ edma->hw_interrupt = m2m_hw_interrupt;
|
||
+ } else {
|
||
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
|
||
+
|
||
+ edma->hw_setup = m2p_hw_setup;
|
||
+ edma->hw_shutdown = m2p_hw_shutdown;
|
||
+ edma->hw_submit = m2p_hw_submit;
|
||
+ edma->hw_interrupt = m2p_hw_interrupt;
|
||
+ }
|
||
+
|
||
+ ret = dma_async_device_register(dma_dev);
|
||
+ if (unlikely(ret)) {
|
||
+ for (i = 0; i < edma->num_channels; i++) {
|
||
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
|
||
+ if (!IS_ERR_OR_NULL(edmac->clk))
|
||
+ clk_put(edmac->clk);
|
||
+ }
|
||
+ kfree(edma);
|
||
+ } else {
|
||
+ dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
|
||
+ edma->m2m ? "M" : "P");
|
||
+ }
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static struct platform_device_id ep93xx_dma_driver_ids[] = {
|
||
+ { "ep93xx-dma-m2p", 0 },
|
||
+ { "ep93xx-dma-m2m", 1 },
|
||
+ { },
|
||
+};
|
||
+
|
||
+static struct platform_driver ep93xx_dma_driver = {
|
||
+ .driver = {
|
||
+ .name = "ep93xx-dma",
|
||
+ },
|
||
+ .id_table = ep93xx_dma_driver_ids,
|
||
+};
|
||
+
|
||
+static int __init ep93xx_dma_module_init(void)
|
||
+{
|
||
+ return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
|
||
+}
|
||
+subsys_initcall(ep93xx_dma_module_init);
|
||
+
|
||
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
|
||
+MODULE_DESCRIPTION("EP93xx DMA driver");
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/dma/mmp_pdma.c linux-3.0.101.xm510/drivers/dma/mmp_pdma.c
|
||
--- linux-3.0.101/drivers/dma/mmp_pdma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/mmp_pdma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,874 @@
|
||
+/*
|
||
+ * Copyright 2012 Marvell International Ltd.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+#include <linux/err.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/types.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/device.h>
|
||
+#include <linux/platform_data/mmp_dma.h>
|
||
+#include <linux/dmapool.h>
|
||
+#include <linux/of_device.h>
|
||
+#include <linux/of.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+
|
||
+#define DCSR 0x0000
|
||
+#define DALGN 0x00a0
|
||
+#define DINT 0x00f0
|
||
+#define DDADR 0x0200
|
||
+#define DSADR 0x0204
|
||
+#define DTADR 0x0208
|
||
+#define DCMD 0x020c
|
||
+
|
||
+#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
|
||
+#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
|
||
+#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
|
||
+#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
|
||
+#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
|
||
+#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
|
||
+#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
|
||
+#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
|
||
+
|
||
+#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
|
||
+#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
|
||
+#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
|
||
+#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
|
||
+#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
|
||
+#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
|
||
+#define DCSR_EORINTR (1 << 9) /* The end of Receive */
|
||
+
|
||
+#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
|
||
+#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
|
||
+
|
||
+#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
|
||
+#define DDADR_STOP (1 << 0) /* Stop (read / write) */
|
||
+
|
||
+#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
|
||
+#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
|
||
+#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
|
||
+#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
|
||
+#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
|
||
+#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
|
||
+#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
|
||
+#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
|
||
+#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
|
||
+#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
|
||
+#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
|
||
+#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
|
||
+#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
|
||
+#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
|
||
+
|
||
+#define PDMA_ALIGNMENT 3
|
||
+#define PDMA_MAX_DESC_BYTES 0x1000
|
||
+
|
||
+struct mmp_pdma_desc_hw {
|
||
+ u32 ddadr; /* Points to the next descriptor + flags */
|
||
+ u32 dsadr; /* DSADR value for the current transfer */
|
||
+ u32 dtadr; /* DTADR value for the current transfer */
|
||
+ u32 dcmd; /* DCMD value for the current transfer */
|
||
+} __aligned(32);
|
||
+
|
||
+struct mmp_pdma_desc_sw {
|
||
+ struct mmp_pdma_desc_hw desc;
|
||
+ struct list_head node;
|
||
+ struct list_head tx_list;
|
||
+ struct dma_async_tx_descriptor async_tx;
|
||
+};
|
||
+
|
||
+struct mmp_pdma_phy;
|
||
+
|
||
+struct mmp_pdma_chan {
|
||
+ struct device *dev;
|
||
+ struct dma_chan chan;
|
||
+ struct dma_async_tx_descriptor desc;
|
||
+ struct mmp_pdma_phy *phy;
|
||
+ enum dma_transfer_direction dir;
|
||
+
|
||
+ /* channel's basic info */
|
||
+ struct tasklet_struct tasklet;
|
||
+ u32 dcmd;
|
||
+ u32 drcmr;
|
||
+ u32 dev_addr;
|
||
+
|
||
+ /* list for desc */
|
||
+ spinlock_t desc_lock; /* Descriptor list lock */
|
||
+ struct list_head chain_pending; /* Link descriptors queue for pending */
|
||
+ struct list_head chain_running; /* Link descriptors queue for running */
|
||
+ bool idle; /* channel statue machine */
|
||
+
|
||
+ struct dma_pool *desc_pool; /* Descriptors pool */
|
||
+};
|
||
+
|
||
+struct mmp_pdma_phy {
|
||
+ int idx;
|
||
+ void __iomem *base;
|
||
+ struct mmp_pdma_chan *vchan;
|
||
+};
|
||
+
|
||
+struct mmp_pdma_device {
|
||
+ int dma_channels;
|
||
+ void __iomem *base;
|
||
+ struct device *dev;
|
||
+ struct dma_device device;
|
||
+ struct mmp_pdma_phy *phy;
|
||
+};
|
||
+
|
||
+#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
|
||
+#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
|
||
+#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
|
||
+#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
|
||
+
|
||
+static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
|
||
+{
|
||
+ u32 reg = (phy->idx << 4) + DDADR;
|
||
+
|
||
+ writel(addr, phy->base + reg);
|
||
+}
|
||
+
|
||
+static void enable_chan(struct mmp_pdma_phy *phy)
|
||
+{
|
||
+ u32 reg;
|
||
+
|
||
+ if (!phy->vchan)
|
||
+ return;
|
||
+
|
||
+ reg = phy->vchan->drcmr;
|
||
+ reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
|
||
+ writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
|
||
+
|
||
+ reg = (phy->idx << 2) + DCSR;
|
||
+ writel(readl(phy->base + reg) | DCSR_RUN,
|
||
+ phy->base + reg);
|
||
+}
|
||
+
|
||
+static void disable_chan(struct mmp_pdma_phy *phy)
|
||
+{
|
||
+ u32 reg;
|
||
+
|
||
+ if (phy) {
|
||
+ reg = (phy->idx << 2) + DCSR;
|
||
+ writel(readl(phy->base + reg) & ~DCSR_RUN,
|
||
+ phy->base + reg);
|
||
+ }
|
||
+}
|
||
+
|
||
+static int clear_chan_irq(struct mmp_pdma_phy *phy)
|
||
+{
|
||
+ u32 dcsr;
|
||
+ u32 dint = readl(phy->base + DINT);
|
||
+ u32 reg = (phy->idx << 2) + DCSR;
|
||
+
|
||
+ if (dint & BIT(phy->idx)) {
|
||
+ /* clear irq */
|
||
+ dcsr = readl(phy->base + reg);
|
||
+ writel(dcsr, phy->base + reg);
|
||
+ if ((dcsr & DCSR_BUSERR) && (phy->vchan))
|
||
+ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
|
||
+ return 0;
|
||
+ }
|
||
+ return -EAGAIN;
|
||
+}
|
||
+
|
||
+static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
|
||
+{
|
||
+ struct mmp_pdma_phy *phy = dev_id;
|
||
+
|
||
+ if (clear_chan_irq(phy) == 0) {
|
||
+ tasklet_schedule(&phy->vchan->tasklet);
|
||
+ return IRQ_HANDLED;
|
||
+ } else
|
||
+ return IRQ_NONE;
|
||
+}
|
||
+
|
||
+static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
|
||
+{
|
||
+ struct mmp_pdma_device *pdev = dev_id;
|
||
+ struct mmp_pdma_phy *phy;
|
||
+ u32 dint = readl(pdev->base + DINT);
|
||
+ int i, ret;
|
||
+ int irq_num = 0;
|
||
+
|
||
+ while (dint) {
|
||
+ i = __ffs(dint);
|
||
+ dint &= (dint - 1);
|
||
+ phy = &pdev->phy[i];
|
||
+ ret = mmp_pdma_chan_handler(irq, phy);
|
||
+ if (ret == IRQ_HANDLED)
|
||
+ irq_num++;
|
||
+ }
|
||
+
|
||
+ if (irq_num)
|
||
+ return IRQ_HANDLED;
|
||
+ else
|
||
+ return IRQ_NONE;
|
||
+}
|
||
+
|
||
+/* lookup free phy channel as descending priority */
|
||
+static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
|
||
+{
|
||
+ int prio, i;
|
||
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
|
||
+ struct mmp_pdma_phy *phy;
|
||
+
|
||
+ /*
|
||
+ * dma channel priorities
|
||
+ * ch 0 - 3, 16 - 19 <--> (0)
|
||
+ * ch 4 - 7, 20 - 23 <--> (1)
|
||
+ * ch 8 - 11, 24 - 27 <--> (2)
|
||
+ * ch 12 - 15, 28 - 31 <--> (3)
|
||
+ */
|
||
+ for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
|
||
+ for (i = 0; i < pdev->dma_channels; i++) {
|
||
+ if (prio != ((i & 0xf) >> 2))
|
||
+ continue;
|
||
+ phy = &pdev->phy[i];
|
||
+ if (!phy->vchan) {
|
||
+ phy->vchan = pchan;
|
||
+ return phy;
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+/* desc->tx_list ==> pending list */
|
||
+static void append_pending_queue(struct mmp_pdma_chan *chan,
|
||
+ struct mmp_pdma_desc_sw *desc)
|
||
+{
|
||
+ struct mmp_pdma_desc_sw *tail =
|
||
+ to_mmp_pdma_desc(chan->chain_pending.prev);
|
||
+
|
||
+ if (list_empty(&chan->chain_pending))
|
||
+ goto out_splice;
|
||
+
|
||
+ /* one irq per queue, even appended */
|
||
+ tail->desc.ddadr = desc->async_tx.phys;
|
||
+ tail->desc.dcmd &= ~DCMD_ENDIRQEN;
|
||
+
|
||
+ /* softly link to pending list */
|
||
+out_splice:
|
||
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * start_pending_queue - transfer any pending transactions
|
||
+ * pending list ==> running list
|
||
+ */
|
||
+static void start_pending_queue(struct mmp_pdma_chan *chan)
|
||
+{
|
||
+ struct mmp_pdma_desc_sw *desc;
|
||
+
|
||
+ /* still in running, irq will start the pending list */
|
||
+ if (!chan->idle) {
|
||
+ dev_dbg(chan->dev, "DMA controller still busy\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (list_empty(&chan->chain_pending)) {
|
||
+ /* chance to re-fetch phy channel with higher prio */
|
||
+ if (chan->phy) {
|
||
+ chan->phy->vchan = NULL;
|
||
+ chan->phy = NULL;
|
||
+ }
|
||
+ dev_dbg(chan->dev, "no pending list\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (!chan->phy) {
|
||
+ chan->phy = lookup_phy(chan);
|
||
+ if (!chan->phy) {
|
||
+ dev_dbg(chan->dev, "no free dma channel\n");
|
||
+ return;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * pending -> running
|
||
+ * reintilize pending list
|
||
+ */
|
||
+ desc = list_first_entry(&chan->chain_pending,
|
||
+ struct mmp_pdma_desc_sw, node);
|
||
+ list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
|
||
+
|
||
+ /*
|
||
+ * Program the descriptor's address into the DMA controller,
|
||
+ * then start the DMA transaction
|
||
+ */
|
||
+ set_desc(chan->phy, desc->async_tx.phys);
|
||
+ enable_chan(chan->phy);
|
||
+ chan->idle = false;
|
||
+}
|
||
+
|
||
+
|
||
+/* desc->tx_list ==> pending list */
|
||
+static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
|
||
+ struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
|
||
+ struct mmp_pdma_desc_sw *child;
|
||
+ unsigned long flags;
|
||
+ dma_cookie_t cookie = -EBUSY;
|
||
+
|
||
+ spin_lock_irqsave(&chan->desc_lock, flags);
|
||
+
|
||
+ list_for_each_entry(child, &desc->tx_list, node) {
|
||
+ cookie = dma_cookie_assign(&child->async_tx);
|
||
+ }
|
||
+
|
||
+ append_pending_queue(chan, desc);
|
||
+
|
||
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||
+
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
|
||
+{
|
||
+ struct mmp_pdma_desc_sw *desc;
|
||
+ dma_addr_t pdesc;
|
||
+
|
||
+ desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
|
||
+ if (!desc) {
|
||
+ dev_err(chan->dev, "out of memory for link descriptor\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ memset(desc, 0, sizeof(*desc));
|
||
+ INIT_LIST_HEAD(&desc->tx_list);
|
||
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
|
||
+ /* each desc has submit */
|
||
+ desc->async_tx.tx_submit = mmp_pdma_tx_submit;
|
||
+ desc->async_tx.phys = pdesc;
|
||
+
|
||
+ return desc;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
|
||
+ *
|
||
+ * This function will create a dma pool for descriptor allocation.
|
||
+ * Request irq only when channel is requested
|
||
+ * Return - The number of allocated descriptors.
|
||
+ */
|
||
+
|
||
+static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
|
||
+
|
||
+ if (chan->desc_pool)
|
||
+ return 1;
|
||
+
|
||
+ chan->desc_pool =
|
||
+ dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
|
||
+ sizeof(struct mmp_pdma_desc_sw),
|
||
+ __alignof__(struct mmp_pdma_desc_sw), 0);
|
||
+ if (!chan->desc_pool) {
|
||
+ dev_err(chan->dev, "unable to allocate descriptor pool\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+ if (chan->phy) {
|
||
+ chan->phy->vchan = NULL;
|
||
+ chan->phy = NULL;
|
||
+ }
|
||
+ chan->idle = true;
|
||
+ chan->dev_addr = 0;
|
||
+ return 1;
|
||
+}
|
||
+
|
||
+static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
|
||
+ struct list_head *list)
|
||
+{
|
||
+ struct mmp_pdma_desc_sw *desc, *_desc;
|
||
+
|
||
+ list_for_each_entry_safe(desc, _desc, list, node) {
|
||
+ list_del(&desc->node);
|
||
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
|
||
+ }
|
||
+}
|
||
+
|
||
+static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&chan->desc_lock, flags);
|
||
+ mmp_pdma_free_desc_list(chan, &chan->chain_pending);
|
||
+ mmp_pdma_free_desc_list(chan, &chan->chain_running);
|
||
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||
+
|
||
+ dma_pool_destroy(chan->desc_pool);
|
||
+ chan->desc_pool = NULL;
|
||
+ chan->idle = true;
|
||
+ chan->dev_addr = 0;
|
||
+ if (chan->phy) {
|
||
+ chan->phy->vchan = NULL;
|
||
+ chan->phy = NULL;
|
||
+ }
|
||
+ return;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+mmp_pdma_prep_memcpy(struct dma_chan *dchan,
|
||
+ dma_addr_t dma_dst, dma_addr_t dma_src,
|
||
+ size_t len, unsigned long flags)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan;
|
||
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
|
||
+ size_t copy = 0;
|
||
+
|
||
+ if (!dchan)
|
||
+ return NULL;
|
||
+
|
||
+ if (!len)
|
||
+ return NULL;
|
||
+
|
||
+ chan = to_mmp_pdma_chan(dchan);
|
||
+
|
||
+ if (!chan->dir) {
|
||
+ chan->dir = DMA_MEM_TO_MEM;
|
||
+ chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
|
||
+ chan->dcmd |= DCMD_BURST32;
|
||
+ }
|
||
+
|
||
+ do {
|
||
+ /* Allocate the link descriptor from DMA pool */
|
||
+ new = mmp_pdma_alloc_descriptor(chan);
|
||
+ if (!new) {
|
||
+ dev_err(chan->dev, "no memory for desc\n");
|
||
+ goto fail;
|
||
+ }
|
||
+
|
||
+ copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
|
||
+
|
||
+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
|
||
+ new->desc.dsadr = dma_src;
|
||
+ new->desc.dtadr = dma_dst;
|
||
+
|
||
+ if (!first)
|
||
+ first = new;
|
||
+ else
|
||
+ prev->desc.ddadr = new->async_tx.phys;
|
||
+
|
||
+ new->async_tx.cookie = 0;
|
||
+ async_tx_ack(&new->async_tx);
|
||
+
|
||
+ prev = new;
|
||
+ len -= copy;
|
||
+
|
||
+ if (chan->dir == DMA_MEM_TO_DEV) {
|
||
+ dma_src += copy;
|
||
+ } else if (chan->dir == DMA_DEV_TO_MEM) {
|
||
+ dma_dst += copy;
|
||
+ } else if (chan->dir == DMA_MEM_TO_MEM) {
|
||
+ dma_src += copy;
|
||
+ dma_dst += copy;
|
||
+ }
|
||
+
|
||
+ /* Insert the link descriptor to the LD ring */
|
||
+ list_add_tail(&new->node, &first->tx_list);
|
||
+ } while (len);
|
||
+
|
||
+ first->async_tx.flags = flags; /* client is in control of this ack */
|
||
+ first->async_tx.cookie = -EBUSY;
|
||
+
|
||
+ /* last desc and fire IRQ */
|
||
+ new->desc.ddadr = DDADR_STOP;
|
||
+ new->desc.dcmd |= DCMD_ENDIRQEN;
|
||
+
|
||
+ return &first->async_tx;
|
||
+
|
||
+fail:
|
||
+ if (first)
|
||
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
|
||
+ unsigned int sg_len, enum dma_transfer_direction dir,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
|
||
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
|
||
+ size_t len, avail;
|
||
+ struct scatterlist *sg;
|
||
+ dma_addr_t addr;
|
||
+ int i;
|
||
+
|
||
+ if ((sgl == NULL) || (sg_len == 0))
|
||
+ return NULL;
|
||
+
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ addr = sg_dma_address(sg);
|
||
+ avail = sg_dma_len(sgl);
|
||
+
|
||
+ do {
|
||
+ len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
|
||
+
|
||
+ /* allocate and populate the descriptor */
|
||
+ new = mmp_pdma_alloc_descriptor(chan);
|
||
+ if (!new) {
|
||
+ dev_err(chan->dev, "no memory for desc\n");
|
||
+ goto fail;
|
||
+ }
|
||
+
|
||
+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
|
||
+ if (dir == DMA_MEM_TO_DEV) {
|
||
+ new->desc.dsadr = addr;
|
||
+ new->desc.dtadr = chan->dev_addr;
|
||
+ } else {
|
||
+ new->desc.dsadr = chan->dev_addr;
|
||
+ new->desc.dtadr = addr;
|
||
+ }
|
||
+
|
||
+ if (!first)
|
||
+ first = new;
|
||
+ else
|
||
+ prev->desc.ddadr = new->async_tx.phys;
|
||
+
|
||
+ new->async_tx.cookie = 0;
|
||
+ async_tx_ack(&new->async_tx);
|
||
+ prev = new;
|
||
+
|
||
+ /* Insert the link descriptor to the LD ring */
|
||
+ list_add_tail(&new->node, &first->tx_list);
|
||
+
|
||
+ /* update metadata */
|
||
+ addr += len;
|
||
+ avail -= len;
|
||
+ } while (avail);
|
||
+ }
|
||
+
|
||
+ first->async_tx.cookie = -EBUSY;
|
||
+ first->async_tx.flags = flags;
|
||
+
|
||
+ /* last desc and fire IRQ */
|
||
+ new->desc.ddadr = DDADR_STOP;
|
||
+ new->desc.dcmd |= DCMD_ENDIRQEN;
|
||
+
|
||
+ return &first->async_tx;
|
||
+
|
||
+fail:
|
||
+ if (first)
|
||
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
|
||
+ struct dma_slave_config *cfg = (void *)arg;
|
||
+ unsigned long flags;
|
||
+ int ret = 0;
|
||
+ u32 maxburst = 0, addr = 0;
|
||
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
|
||
+
|
||
+ if (!dchan)
|
||
+ return -EINVAL;
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ disable_chan(chan->phy);
|
||
+ if (chan->phy) {
|
||
+ chan->phy->vchan = NULL;
|
||
+ chan->phy = NULL;
|
||
+ }
|
||
+ spin_lock_irqsave(&chan->desc_lock, flags);
|
||
+ mmp_pdma_free_desc_list(chan, &chan->chain_pending);
|
||
+ mmp_pdma_free_desc_list(chan, &chan->chain_running);
|
||
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||
+ chan->idle = true;
|
||
+ break;
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ if (cfg->direction == DMA_DEV_TO_MEM) {
|
||
+ chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
|
||
+ maxburst = cfg->src_maxburst;
|
||
+ width = cfg->src_addr_width;
|
||
+ addr = cfg->src_addr;
|
||
+ } else if (cfg->direction == DMA_MEM_TO_DEV) {
|
||
+ chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
|
||
+ maxburst = cfg->dst_maxburst;
|
||
+ width = cfg->dst_addr_width;
|
||
+ addr = cfg->dst_addr;
|
||
+ }
|
||
+
|
||
+ if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
|
||
+ chan->dcmd |= DCMD_WIDTH1;
|
||
+ else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
|
||
+ chan->dcmd |= DCMD_WIDTH2;
|
||
+ else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
|
||
+ chan->dcmd |= DCMD_WIDTH4;
|
||
+
|
||
+ if (maxburst == 8)
|
||
+ chan->dcmd |= DCMD_BURST8;
|
||
+ else if (maxburst == 16)
|
||
+ chan->dcmd |= DCMD_BURST16;
|
||
+ else if (maxburst == 32)
|
||
+ chan->dcmd |= DCMD_BURST32;
|
||
+
|
||
+ chan->dir = cfg->direction;
|
||
+ chan->drcmr = cfg->slave_id;
|
||
+ chan->dev_addr = addr;
|
||
+ break;
|
||
+ default:
|
||
+ return -ENOSYS;
|
||
+ }
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
|
||
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
|
||
+ enum dma_status ret;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&chan->desc_lock, flags);
|
||
+ ret = dma_cookie_status(dchan, cookie, txstate);
|
||
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * mmp_pdma_issue_pending - Issue the DMA start command
|
||
+ * pending list ==> running list
|
||
+ */
|
||
+static void mmp_pdma_issue_pending(struct dma_chan *dchan)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&chan->desc_lock, flags);
|
||
+ start_pending_queue(chan);
|
||
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||
+}
|
||
+
|
||
+/*
|
||
+ * dma_do_tasklet
|
||
+ * Do call back
|
||
+ * Start pending list
|
||
+ */
|
||
+static void dma_do_tasklet(unsigned long data)
|
||
+{
|
||
+ struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
|
||
+ struct mmp_pdma_desc_sw *desc, *_desc;
|
||
+ LIST_HEAD(chain_cleanup);
|
||
+ unsigned long flags;
|
||
+
|
||
+ /* submit pending list; callback for each desc; free desc */
|
||
+
|
||
+ spin_lock_irqsave(&chan->desc_lock, flags);
|
||
+
|
||
+ /* update the cookie if we have some descriptors to cleanup */
|
||
+ if (!list_empty(&chan->chain_running)) {
|
||
+ dma_cookie_t cookie;
|
||
+
|
||
+ desc = to_mmp_pdma_desc(chan->chain_running.prev);
|
||
+ cookie = desc->async_tx.cookie;
|
||
+ dma_cookie_complete(&desc->async_tx);
|
||
+
|
||
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * move the descriptors to a temporary list so we can drop the lock
|
||
+ * during the entire cleanup operation
|
||
+ */
|
||
+ list_splice_tail_init(&chan->chain_running, &chain_cleanup);
|
||
+
|
||
+ /* the hardware is now idle and ready for more */
|
||
+ chan->idle = true;
|
||
+
|
||
+ /* Start any pending transactions automatically */
|
||
+ start_pending_queue(chan);
|
||
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||
+
|
||
+ /* Run the callback for each descriptor, in order */
|
||
+ list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
|
||
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
|
||
+
|
||
+ /* Remove from the list of transactions */
|
||
+ list_del(&desc->node);
|
||
+ /* Run the link descriptor callback function */
|
||
+ if (txd->callback)
|
||
+ txd->callback(txd->callback_param);
|
||
+
|
||
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
|
||
+ }
|
||
+}
|
||
+
|
||
+static int mmp_pdma_remove(struct platform_device *op)
|
||
+{
|
||
+ struct mmp_pdma_device *pdev = platform_get_drvdata(op);
|
||
+
|
||
+ dma_async_device_unregister(&pdev->device);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
|
||
+ int idx, int irq)
|
||
+{
|
||
+ struct mmp_pdma_phy *phy = &pdev->phy[idx];
|
||
+ struct mmp_pdma_chan *chan;
|
||
+ int ret;
|
||
+
|
||
+ chan = devm_kzalloc(pdev->dev,
|
||
+ sizeof(struct mmp_pdma_chan), GFP_KERNEL);
|
||
+ if (chan == NULL)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ phy->idx = idx;
|
||
+ phy->base = pdev->base;
|
||
+
|
||
+ if (irq) {
|
||
+ ret = devm_request_irq(pdev->dev, irq,
|
||
+ mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
|
||
+ if (ret) {
|
||
+ dev_err(pdev->dev, "channel request irq fail!\n");
|
||
+ return ret;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ spin_lock_init(&chan->desc_lock);
|
||
+ chan->dev = pdev->dev;
|
||
+ chan->chan.device = &pdev->device;
|
||
+ tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
|
||
+ INIT_LIST_HEAD(&chan->chain_pending);
|
||
+ INIT_LIST_HEAD(&chan->chain_running);
|
||
+
|
||
+ /* register virt channel to dma engine */
|
||
+ list_add_tail(&chan->chan.device_node,
|
||
+ &pdev->device.channels);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct of_device_id mmp_pdma_dt_ids[] = {
|
||
+ { .compatible = "marvell,pdma-1.0", },
|
||
+ {}
|
||
+};
|
||
+MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
|
||
+
|
||
+static int mmp_pdma_probe(struct platform_device *op)
|
||
+{
|
||
+ struct mmp_pdma_device *pdev;
|
||
+ const struct of_device_id *of_id;
|
||
+ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
|
||
+ struct resource *iores;
|
||
+ int i, ret, irq = 0;
|
||
+ int dma_channels = 0, irq_num = 0;
|
||
+
|
||
+ pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
|
||
+ if (!pdev)
|
||
+ return -ENOMEM;
|
||
+ pdev->dev = &op->dev;
|
||
+
|
||
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
|
||
+ if (!iores)
|
||
+ return -EINVAL;
|
||
+
|
||
+ pdev->base = devm_ioremap_resource(pdev->dev, iores);
|
||
+ if (IS_ERR(pdev->base))
|
||
+ return PTR_ERR(pdev->base);
|
||
+
|
||
+ of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
|
||
+ if (of_id)
|
||
+ of_property_read_u32(pdev->dev->of_node,
|
||
+ "#dma-channels", &dma_channels);
|
||
+ else if (pdata && pdata->dma_channels)
|
||
+ dma_channels = pdata->dma_channels;
|
||
+ else
|
||
+ dma_channels = 32; /* default 32 channel */
|
||
+ pdev->dma_channels = dma_channels;
|
||
+
|
||
+ for (i = 0; i < dma_channels; i++) {
|
||
+ if (platform_get_irq(op, i) > 0)
|
||
+ irq_num++;
|
||
+ }
|
||
+
|
||
+ pdev->phy = devm_kzalloc(pdev->dev,
|
||
+ dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
|
||
+ if (pdev->phy == NULL)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ INIT_LIST_HEAD(&pdev->device.channels);
|
||
+
|
||
+ if (irq_num != dma_channels) {
|
||
+ /* all chan share one irq, demux inside */
|
||
+ irq = platform_get_irq(op, 0);
|
||
+ ret = devm_request_irq(pdev->dev, irq,
|
||
+ mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ for (i = 0; i < dma_channels; i++) {
|
||
+ irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
|
||
+ ret = mmp_pdma_chan_init(pdev, i, irq);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
|
||
+ dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
|
||
+ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
|
||
+ pdev->device.dev = &op->dev;
|
||
+ pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
|
||
+ pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
|
||
+ pdev->device.device_tx_status = mmp_pdma_tx_status;
|
||
+ pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
|
||
+ pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
|
||
+ pdev->device.device_issue_pending = mmp_pdma_issue_pending;
|
||
+ pdev->device.device_control = mmp_pdma_control;
|
||
+ pdev->device.copy_align = PDMA_ALIGNMENT;
|
||
+
|
||
+ if (pdev->dev->coherent_dma_mask)
|
||
+ dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
|
||
+ else
|
||
+ dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
|
||
+
|
||
+ ret = dma_async_device_register(&pdev->device);
|
||
+ if (ret) {
|
||
+ dev_err(pdev->device.dev, "unable to register\n");
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ dev_info(pdev->device.dev, "initialized\n");
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static const struct platform_device_id mmp_pdma_id_table[] = {
|
||
+ { "mmp-pdma", },
|
||
+ { },
|
||
+};
|
||
+
|
||
+static struct platform_driver mmp_pdma_driver = {
|
||
+ .driver = {
|
||
+ .name = "mmp-pdma",
|
||
+ .owner = THIS_MODULE,
|
||
+ .of_match_table = mmp_pdma_dt_ids,
|
||
+ },
|
||
+ .id_table = mmp_pdma_id_table,
|
||
+ .probe = mmp_pdma_probe,
|
||
+ .remove = mmp_pdma_remove,
|
||
+};
|
||
+
|
||
+module_platform_driver(mmp_pdma_driver);
|
||
+
|
||
+MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
|
||
+MODULE_AUTHOR("Marvell International Ltd.");
|
||
+MODULE_LICENSE("GPL v2");
|
||
diff -urN linux-3.0.101/drivers/dma/mmp_tdma.c linux-3.0.101.xm510/drivers/dma/mmp_tdma.c
|
||
--- linux-3.0.101/drivers/dma/mmp_tdma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/mmp_tdma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,626 @@
|
||
+/*
|
||
+ * Driver For Marvell Two-channel DMA Engine
|
||
+ *
|
||
+ * Copyright: Marvell International Ltd.
|
||
+ *
|
||
+ * The code contained herein is licensed under the GNU General Public
|
||
+ * License. You may obtain a copy of the GNU General Public License
|
||
+ * Version 2 or later at the following locations:
|
||
+ *
|
||
+ */
|
||
+
|
||
+#include <linux/err.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/types.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/device.h>
|
||
+#include <mach/regs-icu.h>
|
||
+#include <linux/platform_data/dma-mmp_tdma.h>
|
||
+#include <linux/of_device.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+
|
||
+/*
|
||
+ * Two-Channel DMA registers
|
||
+ */
|
||
+#define TDBCR 0x00 /* Byte Count */
|
||
+#define TDSAR 0x10 /* Src Addr */
|
||
+#define TDDAR 0x20 /* Dst Addr */
|
||
+#define TDNDPR 0x30 /* Next Desc */
|
||
+#define TDCR 0x40 /* Control */
|
||
+#define TDCP 0x60 /* Priority*/
|
||
+#define TDCDPR 0x70 /* Current Desc */
|
||
+#define TDIMR 0x80 /* Int Mask */
|
||
+#define TDISR 0xa0 /* Int Status */
|
||
+
|
||
+/* Two-Channel DMA Control Register */
|
||
+#define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
|
||
+#define TDCR_SSZ_12_BITS (0x1 << 22)
|
||
+#define TDCR_SSZ_16_BITS (0x2 << 22)
|
||
+#define TDCR_SSZ_20_BITS (0x3 << 22)
|
||
+#define TDCR_SSZ_24_BITS (0x4 << 22)
|
||
+#define TDCR_SSZ_32_BITS (0x5 << 22)
|
||
+#define TDCR_SSZ_SHIFT (0x1 << 22)
|
||
+#define TDCR_SSZ_MASK (0x7 << 22)
|
||
+#define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
|
||
+#define TDCR_ABR (0x1 << 20) /* Channel Abort */
|
||
+#define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
|
||
+#define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
|
||
+#define TDCR_CHANACT (0x1 << 14) /* Channel Active */
|
||
+#define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
|
||
+#define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
|
||
+#define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
|
||
+#define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
|
||
+#define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
|
||
+#define TDCR_BURSTSZ_4B (0x0 << 6)
|
||
+#define TDCR_BURSTSZ_8B (0x1 << 6)
|
||
+#define TDCR_BURSTSZ_16B (0x3 << 6)
|
||
+#define TDCR_BURSTSZ_32B (0x6 << 6)
|
||
+#define TDCR_BURSTSZ_64B (0x7 << 6)
|
||
+#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
|
||
+#define TDCR_BURSTSZ_128B (0x5 << 6)
|
||
+#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
|
||
+#define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
|
||
+#define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
|
||
+#define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
|
||
+#define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
|
||
+#define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
|
||
+#define TDCR_DSTDESCCONT (0x1 << 1)
|
||
+#define TDCR_SRCDESTCONT (0x1 << 0)
|
||
+
|
||
+/* Two-Channel DMA Int Mask Register */
|
||
+#define TDIMR_COMP (0x1 << 0)
|
||
+
|
||
+/* Two-Channel DMA Int Status Register */
|
||
+#define TDISR_COMP (0x1 << 0)
|
||
+
|
||
+/*
|
||
+ * Two-Channel DMA Descriptor Struct
|
||
+ * NOTE: desc's buf must be aligned to 16 bytes.
|
||
+ */
|
||
+struct mmp_tdma_desc {
|
||
+ u32 byte_cnt;
|
||
+ u32 src_addr;
|
||
+ u32 dst_addr;
|
||
+ u32 nxt_desc;
|
||
+};
|
||
+
|
||
+enum mmp_tdma_type {
|
||
+ MMP_AUD_TDMA = 0,
|
||
+ PXA910_SQU,
|
||
+};
|
||
+
|
||
+#define TDMA_ALIGNMENT 3
|
||
+#define TDMA_MAX_XFER_BYTES SZ_64K
|
||
+
|
||
+struct mmp_tdma_chan {
|
||
+ struct device *dev;
|
||
+ struct dma_chan chan;
|
||
+ struct dma_async_tx_descriptor desc;
|
||
+ struct tasklet_struct tasklet;
|
||
+
|
||
+ struct mmp_tdma_desc *desc_arr;
|
||
+ phys_addr_t desc_arr_phys;
|
||
+ int desc_num;
|
||
+ enum dma_transfer_direction dir;
|
||
+ dma_addr_t dev_addr;
|
||
+ u32 burst_sz;
|
||
+ enum dma_slave_buswidth buswidth;
|
||
+ enum dma_status status;
|
||
+
|
||
+ int idx;
|
||
+ enum mmp_tdma_type type;
|
||
+ int irq;
|
||
+ unsigned long reg_base;
|
||
+
|
||
+ size_t buf_len;
|
||
+ size_t period_len;
|
||
+ size_t pos;
|
||
+};
|
||
+
|
||
+#define TDMA_CHANNEL_NUM 2
|
||
+struct mmp_tdma_device {
|
||
+ struct device *dev;
|
||
+ void __iomem *base;
|
||
+ struct dma_device device;
|
||
+ struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
|
||
+};
|
||
+
|
||
+#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
|
||
+
|
||
+static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
|
||
+{
|
||
+ writel(phys, tdmac->reg_base + TDNDPR);
|
||
+ writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
|
||
+ tdmac->reg_base + TDCR);
|
||
+}
|
||
+
|
||
+static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ /* enable irq */
|
||
+ writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
|
||
+ /* enable dma chan */
|
||
+ writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
|
||
+ tdmac->reg_base + TDCR);
|
||
+ tdmac->status = DMA_IN_PROGRESS;
|
||
+}
|
||
+
|
||
+static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
|
||
+ tdmac->reg_base + TDCR);
|
||
+
|
||
+ /* disable irq */
|
||
+ writel(0, tdmac->reg_base + TDIMR);
|
||
+
|
||
+ tdmac->status = DMA_SUCCESS;
|
||
+}
|
||
+
|
||
+static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
|
||
+ tdmac->reg_base + TDCR);
|
||
+ tdmac->status = DMA_IN_PROGRESS;
|
||
+}
|
||
+
|
||
+static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
|
||
+ tdmac->reg_base + TDCR);
|
||
+ tdmac->status = DMA_PAUSED;
|
||
+}
|
||
+
|
||
+static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ unsigned int tdcr;
|
||
+
|
||
+ mmp_tdma_disable_chan(tdmac);
|
||
+
|
||
+ if (tdmac->dir == DMA_MEM_TO_DEV)
|
||
+ tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
|
||
+ else if (tdmac->dir == DMA_DEV_TO_MEM)
|
||
+ tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC;
|
||
+
|
||
+ if (tdmac->type == MMP_AUD_TDMA) {
|
||
+ tdcr |= TDCR_PACKMOD;
|
||
+
|
||
+ switch (tdmac->burst_sz) {
|
||
+ case 4:
|
||
+ tdcr |= TDCR_BURSTSZ_4B;
|
||
+ break;
|
||
+ case 8:
|
||
+ tdcr |= TDCR_BURSTSZ_8B;
|
||
+ break;
|
||
+ case 16:
|
||
+ tdcr |= TDCR_BURSTSZ_16B;
|
||
+ break;
|
||
+ case 32:
|
||
+ tdcr |= TDCR_BURSTSZ_32B;
|
||
+ break;
|
||
+ case 64:
|
||
+ tdcr |= TDCR_BURSTSZ_64B;
|
||
+ break;
|
||
+ case 128:
|
||
+ tdcr |= TDCR_BURSTSZ_128B;
|
||
+ break;
|
||
+ default:
|
||
+ dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ switch (tdmac->buswidth) {
|
||
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||
+ tdcr |= TDCR_SSZ_8_BITS;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||
+ tdcr |= TDCR_SSZ_16_BITS;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||
+ tdcr |= TDCR_SSZ_32_BITS;
|
||
+ break;
|
||
+ default:
|
||
+ dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+ } else if (tdmac->type == PXA910_SQU) {
|
||
+ tdcr |= TDCR_BURSTSZ_SQU_32B;
|
||
+ tdcr |= TDCR_SSPMOD;
|
||
+ }
|
||
+
|
||
+ writel(tdcr, tdmac->reg_base + TDCR);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ u32 reg = readl(tdmac->reg_base + TDISR);
|
||
+
|
||
+ if (reg & TDISR_COMP) {
|
||
+ /* clear irq */
|
||
+ reg &= ~TDISR_COMP;
|
||
+ writel(reg, tdmac->reg_base + TDISR);
|
||
+
|
||
+ return 0;
|
||
+ }
|
||
+ return -EAGAIN;
|
||
+}
|
||
+
|
||
+static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = dev_id;
|
||
+
|
||
+ if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
|
||
+ tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
|
||
+ tasklet_schedule(&tdmac->tasklet);
|
||
+ return IRQ_HANDLED;
|
||
+ } else
|
||
+ return IRQ_NONE;
|
||
+}
|
||
+
|
||
+static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
|
||
+{
|
||
+ struct mmp_tdma_device *tdev = dev_id;
|
||
+ int i, ret;
|
||
+ int irq_num = 0;
|
||
+
|
||
+ for (i = 0; i < TDMA_CHANNEL_NUM; i++) {
|
||
+ struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
|
||
+
|
||
+ ret = mmp_tdma_chan_handler(irq, tdmac);
|
||
+ if (ret == IRQ_HANDLED)
|
||
+ irq_num++;
|
||
+ }
|
||
+
|
||
+ if (irq_num)
|
||
+ return IRQ_HANDLED;
|
||
+ else
|
||
+ return IRQ_NONE;
|
||
+}
|
||
+
|
||
+static void dma_do_tasklet(unsigned long data)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
|
||
+
|
||
+ if (tdmac->desc.callback)
|
||
+ tdmac->desc.callback(tdmac->desc.callback_param);
|
||
+
|
||
+}
|
||
+
|
||
+static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ struct gen_pool *gpool;
|
||
+ int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
|
||
+
|
||
+ gpool = sram_get_gpool("asram");
|
||
+ if (tdmac->desc_arr)
|
||
+ gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
|
||
+ size);
|
||
+ tdmac->desc_arr = NULL;
|
||
+
|
||
+ return;
|
||
+}
|
||
+
|
||
+static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
|
||
+
|
||
+ mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||
+ int ret;
|
||
+
|
||
+ dma_async_tx_descriptor_init(&tdmac->desc, chan);
|
||
+ tdmac->desc.tx_submit = mmp_tdma_tx_submit;
|
||
+
|
||
+ if (tdmac->irq) {
|
||
+ ret = devm_request_irq(tdmac->dev, tdmac->irq,
|
||
+ mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+ }
|
||
+ return 1;
|
||
+}
|
||
+
|
||
+static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||
+
|
||
+ if (tdmac->irq)
|
||
+ devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
|
||
+ mmp_tdma_free_descriptor(tdmac);
|
||
+ return;
|
||
+}
|
||
+
|
||
+struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
|
||
+{
|
||
+ struct gen_pool *gpool;
|
||
+ int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
|
||
+
|
||
+ gpool = sram_get_gpool("asram");
|
||
+ if (!gpool)
|
||
+ return NULL;
|
||
+
|
||
+ tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
|
||
+ if (!tdmac->desc_arr)
|
||
+ return NULL;
|
||
+
|
||
+ tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
|
||
+ (unsigned long)tdmac->desc_arr);
|
||
+
|
||
+ return tdmac->desc_arr;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
|
||
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
|
||
+ size_t period_len, enum dma_transfer_direction direction,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||
+ struct mmp_tdma_desc *desc;
|
||
+ int num_periods = buf_len / period_len;
|
||
+ int i = 0, buf = 0;
|
||
+
|
||
+ if (tdmac->status != DMA_SUCCESS)
|
||
+ return NULL;
|
||
+
|
||
+ if (period_len > TDMA_MAX_XFER_BYTES) {
|
||
+ dev_err(tdmac->dev,
|
||
+ "maximum period size exceeded: %d > %d\n",
|
||
+ period_len, TDMA_MAX_XFER_BYTES);
|
||
+ goto err_out;
|
||
+ }
|
||
+
|
||
+ tdmac->status = DMA_IN_PROGRESS;
|
||
+ tdmac->desc_num = num_periods;
|
||
+ desc = mmp_tdma_alloc_descriptor(tdmac);
|
||
+ if (!desc)
|
||
+ goto err_out;
|
||
+
|
||
+ while (buf < buf_len) {
|
||
+ desc = &tdmac->desc_arr[i];
|
||
+
|
||
+ if (i + 1 == num_periods)
|
||
+ desc->nxt_desc = tdmac->desc_arr_phys;
|
||
+ else
|
||
+ desc->nxt_desc = tdmac->desc_arr_phys +
|
||
+ sizeof(*desc) * (i + 1);
|
||
+
|
||
+ if (direction == DMA_MEM_TO_DEV) {
|
||
+ desc->src_addr = dma_addr;
|
||
+ desc->dst_addr = tdmac->dev_addr;
|
||
+ } else {
|
||
+ desc->src_addr = tdmac->dev_addr;
|
||
+ desc->dst_addr = dma_addr;
|
||
+ }
|
||
+ desc->byte_cnt = period_len;
|
||
+ dma_addr += period_len;
|
||
+ buf += period_len;
|
||
+ i++;
|
||
+ }
|
||
+
|
||
+ tdmac->buf_len = buf_len;
|
||
+ tdmac->period_len = period_len;
|
||
+ tdmac->pos = 0;
|
||
+
|
||
+ return &tdmac->desc;
|
||
+
|
||
+err_out:
|
||
+ tdmac->status = DMA_ERROR;
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||
+ struct dma_slave_config *dmaengine_cfg = (void *)arg;
|
||
+ int ret = 0;
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ mmp_tdma_disable_chan(tdmac);
|
||
+ break;
|
||
+ case DMA_PAUSE:
|
||
+ mmp_tdma_pause_chan(tdmac);
|
||
+ break;
|
||
+ case DMA_RESUME:
|
||
+ mmp_tdma_resume_chan(tdmac);
|
||
+ break;
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
|
||
+ tdmac->dev_addr = dmaengine_cfg->src_addr;
|
||
+ tdmac->burst_sz = dmaengine_cfg->src_maxburst;
|
||
+ tdmac->buswidth = dmaengine_cfg->src_addr_width;
|
||
+ } else {
|
||
+ tdmac->dev_addr = dmaengine_cfg->dst_addr;
|
||
+ tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
|
||
+ tdmac->buswidth = dmaengine_cfg->dst_addr_width;
|
||
+ }
|
||
+ tdmac->dir = dmaengine_cfg->direction;
|
||
+ return mmp_tdma_config_chan(tdmac);
|
||
+ default:
|
||
+ ret = -ENOSYS;
|
||
+ }
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||
+
|
||
+ dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
|
||
+
|
||
+ return tdmac->status;
|
||
+}
|
||
+
|
||
+static void mmp_tdma_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||
+
|
||
+ mmp_tdma_enable_chan(tdmac);
|
||
+}
|
||
+
|
||
+static int mmp_tdma_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
|
||
+
|
||
+ dma_async_device_unregister(&tdev->device);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
|
||
+ int idx, int irq, int type)
|
||
+{
|
||
+ struct mmp_tdma_chan *tdmac;
|
||
+
|
||
+ if (idx >= TDMA_CHANNEL_NUM) {
|
||
+ dev_err(tdev->dev, "too many channels for device!\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ /* alloc channel */
|
||
+ tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
|
||
+ if (!tdmac) {
|
||
+ dev_err(tdev->dev, "no free memory for DMA channels!\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+ if (irq)
|
||
+ tdmac->irq = irq;
|
||
+ tdmac->dev = tdev->dev;
|
||
+ tdmac->chan.device = &tdev->device;
|
||
+ tdmac->idx = idx;
|
||
+ tdmac->type = type;
|
||
+ tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
|
||
+ tdmac->status = DMA_SUCCESS;
|
||
+ tdev->tdmac[tdmac->idx] = tdmac;
|
||
+ tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
|
||
+
|
||
+ /* add the channel to tdma_chan list */
|
||
+ list_add_tail(&tdmac->chan.device_node,
|
||
+ &tdev->device.channels);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct of_device_id mmp_tdma_dt_ids[] = {
|
||
+ { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
|
||
+ { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
|
||
+ {}
|
||
+};
|
||
+MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
|
||
+
|
||
+static int mmp_tdma_probe(struct platform_device *pdev)
|
||
+{
|
||
+ enum mmp_tdma_type type;
|
||
+ const struct of_device_id *of_id;
|
||
+ struct mmp_tdma_device *tdev;
|
||
+ struct resource *iores;
|
||
+ int i, ret;
|
||
+ int irq = 0, irq_num = 0;
|
||
+ int chan_num = TDMA_CHANNEL_NUM;
|
||
+
|
||
+ of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
|
||
+ if (of_id)
|
||
+ type = (enum mmp_tdma_type) of_id->data;
|
||
+ else
|
||
+ type = platform_get_device_id(pdev)->driver_data;
|
||
+
|
||
+ /* always have couple channels */
|
||
+ tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
|
||
+ if (!tdev)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ tdev->dev = &pdev->dev;
|
||
+
|
||
+ for (i = 0; i < chan_num; i++) {
|
||
+ if (platform_get_irq(pdev, i) > 0)
|
||
+ irq_num++;
|
||
+ }
|
||
+
|
||
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ if (!iores)
|
||
+ return -EINVAL;
|
||
+
|
||
+ tdev->base = devm_ioremap_resource(&pdev->dev, iores);
|
||
+ if (IS_ERR(tdev->base))
|
||
+ return PTR_ERR(tdev->base);
|
||
+
|
||
+ INIT_LIST_HEAD(&tdev->device.channels);
|
||
+
|
||
+ if (irq_num != chan_num) {
|
||
+ irq = platform_get_irq(pdev, 0);
|
||
+ ret = devm_request_irq(&pdev->dev, irq,
|
||
+ mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ /* initialize channel parameters */
|
||
+ for (i = 0; i < chan_num; i++) {
|
||
+ irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
|
||
+ ret = mmp_tdma_chan_init(tdev, i, irq, type);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
|
||
+ dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
|
||
+ tdev->device.dev = &pdev->dev;
|
||
+ tdev->device.device_alloc_chan_resources =
|
||
+ mmp_tdma_alloc_chan_resources;
|
||
+ tdev->device.device_free_chan_resources =
|
||
+ mmp_tdma_free_chan_resources;
|
||
+ tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
|
||
+ tdev->device.device_tx_status = mmp_tdma_tx_status;
|
||
+ tdev->device.device_issue_pending = mmp_tdma_issue_pending;
|
||
+ tdev->device.device_control = mmp_tdma_control;
|
||
+ tdev->device.copy_align = TDMA_ALIGNMENT;
|
||
+
|
||
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||
+ platform_set_drvdata(pdev, tdev);
|
||
+
|
||
+ ret = dma_async_device_register(&tdev->device);
|
||
+ if (ret) {
|
||
+ dev_err(tdev->device.dev, "unable to register\n");
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ dev_info(tdev->device.dev, "initialized\n");
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static const struct platform_device_id mmp_tdma_id_table[] = {
|
||
+ { "mmp-adma", MMP_AUD_TDMA },
|
||
+ { "pxa910-squ", PXA910_SQU },
|
||
+ { },
|
||
+};
|
||
+
|
||
+static struct platform_driver mmp_tdma_driver = {
|
||
+ .driver = {
|
||
+ .name = "mmp-tdma",
|
||
+ .owner = THIS_MODULE,
|
||
+ .of_match_table = mmp_tdma_dt_ids,
|
||
+ },
|
||
+ .id_table = mmp_tdma_id_table,
|
||
+ .probe = mmp_tdma_probe,
|
||
+ .remove = mmp_tdma_remove,
|
||
+};
|
||
+
|
||
+module_platform_driver(mmp_tdma_driver);
|
||
+
|
||
+MODULE_LICENSE("GPL");
|
||
+MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
|
||
+MODULE_ALIAS("platform:mmp-tdma");
|
||
+MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
|
||
+MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
|
||
diff -urN linux-3.0.101/drivers/dma/of-dma.c linux-3.0.101.xm510/drivers/dma/of-dma.c
|
||
--- linux-3.0.101/drivers/dma/of-dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/of-dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,216 @@
|
||
+/*
|
||
+ * Device tree helpers for DMA request / controller
|
||
+ *
|
||
+ * Based on of_gpio.c
|
||
+ *
|
||
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/device.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/mutex.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/of.h>
|
||
+#include <linux/of_dma.h>
|
||
+
|
||
+static LIST_HEAD(of_dma_list);
|
||
+static DEFINE_MUTEX(of_dma_lock);
|
||
+
|
||
+/**
|
||
+ * of_dma_find_controller - Get a DMA controller in DT DMA helpers list
|
||
+ * @dma_spec: pointer to DMA specifier as found in the device tree
|
||
+ *
|
||
+ * Finds a DMA controller with matching device node and number for dma cells
|
||
+ * in a list of registered DMA controllers. If a match is found a valid pointer
|
||
+ * to the DMA data stored is retuned. A NULL pointer is returned if no match is
|
||
+ * found.
|
||
+ */
|
||
+static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
|
||
+{
|
||
+ struct of_dma *ofdma;
|
||
+
|
||
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
|
||
+ if (ofdma->of_node == dma_spec->np)
|
||
+ return ofdma;
|
||
+
|
||
+ pr_debug("%s: can't find DMA controller %s\n", __func__,
|
||
+ dma_spec->np->full_name);
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * of_dma_controller_register - Register a DMA controller to DT DMA helpers
|
||
+ * @np: device node of DMA controller
|
||
+ * @of_dma_xlate: translation function which converts a phandle
|
||
+ * arguments list into a dma_chan structure
|
||
+ * @data pointer to controller specific data to be used by
|
||
+ * translation function
|
||
+ *
|
||
+ * Returns 0 on success or appropriate errno value on error.
|
||
+ *
|
||
+ * Allocated memory should be freed with appropriate of_dma_controller_free()
|
||
+ * call.
|
||
+ */
|
||
+int of_dma_controller_register(struct device_node *np,
|
||
+ struct dma_chan *(*of_dma_xlate)
|
||
+ (struct of_phandle_args *, struct of_dma *),
|
||
+ void *data)
|
||
+{
|
||
+ struct of_dma *ofdma;
|
||
+
|
||
+ if (!np || !of_dma_xlate) {
|
||
+ pr_err("%s: not enough information provided\n", __func__);
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
|
||
+ if (!ofdma)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ ofdma->of_node = np;
|
||
+ ofdma->of_dma_xlate = of_dma_xlate;
|
||
+ ofdma->of_dma_data = data;
|
||
+
|
||
+ /* Now queue of_dma controller structure in list */
|
||
+ mutex_lock(&of_dma_lock);
|
||
+ list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
|
||
+ mutex_unlock(&of_dma_lock);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(of_dma_controller_register);
|
||
+
|
||
+/**
|
||
+ * of_dma_controller_free - Remove a DMA controller from DT DMA helpers list
|
||
+ * @np: device node of DMA controller
|
||
+ *
|
||
+ * Memory allocated by of_dma_controller_register() is freed here.
|
||
+ */
|
||
+void of_dma_controller_free(struct device_node *np)
|
||
+{
|
||
+ struct of_dma *ofdma;
|
||
+
|
||
+ mutex_lock(&of_dma_lock);
|
||
+
|
||
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
|
||
+ if (ofdma->of_node == np) {
|
||
+ list_del(&ofdma->of_dma_controllers);
|
||
+ kfree(ofdma);
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ mutex_unlock(&of_dma_lock);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(of_dma_controller_free);
|
||
+
|
||
+/**
|
||
+ * of_dma_match_channel - Check if a DMA specifier matches name
|
||
+ * @np: device node to look for DMA channels
|
||
+ * @name: channel name to be matched
|
||
+ * @index: index of DMA specifier in list of DMA specifiers
|
||
+ * @dma_spec: pointer to DMA specifier as found in the device tree
|
||
+ *
|
||
+ * Check if the DMA specifier pointed to by the index in a list of DMA
|
||
+ * specifiers, matches the name provided. Returns 0 if the name matches and
|
||
+ * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
|
||
+ */
|
||
+static int of_dma_match_channel(struct device_node *np, const char *name,
|
||
+ int index, struct of_phandle_args *dma_spec)
|
||
+{
|
||
+ const char *s;
|
||
+
|
||
+ if (of_property_read_string_index(np, "dma-names", index, &s))
|
||
+ return -ENODEV;
|
||
+
|
||
+ if (strcmp(name, s))
|
||
+ return -ENODEV;
|
||
+
|
||
+ if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
|
||
+ dma_spec))
|
||
+ return -ENODEV;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * of_dma_request_slave_channel - Get the DMA slave channel
|
||
+ * @np: device node to get DMA request from
|
||
+ * @name: name of desired channel
|
||
+ *
|
||
+ * Returns pointer to appropriate dma channel on success or NULL on error.
|
||
+ */
|
||
+struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||
+ const char *name)
|
||
+{
|
||
+ struct of_phandle_args dma_spec;
|
||
+ struct of_dma *ofdma;
|
||
+ struct dma_chan *chan;
|
||
+ int count, i;
|
||
+
|
||
+ if (!np || !name) {
|
||
+ pr_err("%s: not enough information provided\n", __func__);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ count = of_property_count_strings(np, "dma-names");
|
||
+ if (count < 0) {
|
||
+ pr_err("%s: dma-names property missing or empty\n", __func__);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ for (i = 0; i < count; i++) {
|
||
+ if (of_dma_match_channel(np, name, i, &dma_spec))
|
||
+ continue;
|
||
+
|
||
+ mutex_lock(&of_dma_lock);
|
||
+ ofdma = of_dma_find_controller(&dma_spec);
|
||
+
|
||
+ if (ofdma)
|
||
+ chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
|
||
+ else
|
||
+ chan = NULL;
|
||
+
|
||
+ mutex_unlock(&of_dma_lock);
|
||
+
|
||
+ of_node_put(dma_spec.np);
|
||
+
|
||
+ if (chan)
|
||
+ return chan;
|
||
+ }
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * of_dma_simple_xlate - Simple DMA engine translation function
|
||
+ * @dma_spec: pointer to DMA specifier as found in the device tree
|
||
+ * @of_dma: pointer to DMA controller data
|
||
+ *
|
||
+ * A simple translation function for devices that use a 32-bit value for the
|
||
+ * filter_param when calling the DMA engine dma_request_channel() function.
|
||
+ * Note that this translation function requires that #dma-cells is equal to 1
|
||
+ * and the argument of the dma specifier is the 32-bit filter_param. Returns
|
||
+ * pointer to appropriate dma channel on success or NULL on error.
|
||
+ */
|
||
+struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||
+ struct of_dma *ofdma)
|
||
+{
|
||
+ int count = dma_spec->args_count;
|
||
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
|
||
+
|
||
+ if (!info || !info->filter_fn)
|
||
+ return NULL;
|
||
+
|
||
+ if (count != 1)
|
||
+ return NULL;
|
||
+
|
||
+ return dma_request_channel(info->dma_cap, info->filter_fn,
|
||
+ &dma_spec->args[0]);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
|
||
diff -urN linux-3.0.101/drivers/dma/omap-dma.c linux-3.0.101.xm510/drivers/dma/omap-dma.c
|
||
--- linux-3.0.101/drivers/dma/omap-dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/omap-dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,719 @@
|
||
+/*
|
||
+ * OMAP DMAengine support
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/list.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/omap-dma.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/spinlock.h>
|
||
+#include <linux/of_dma.h>
|
||
+#include <linux/of_device.h>
|
||
+
|
||
+#include "virt-dma.h"
|
||
+
|
||
+struct omap_dmadev {
|
||
+ struct dma_device ddev;
|
||
+ spinlock_t lock;
|
||
+ struct tasklet_struct task;
|
||
+ struct list_head pending;
|
||
+};
|
||
+
|
||
+struct omap_chan {
|
||
+ struct virt_dma_chan vc;
|
||
+ struct list_head node;
|
||
+
|
||
+ struct dma_slave_config cfg;
|
||
+ unsigned dma_sig;
|
||
+ bool cyclic;
|
||
+ bool paused;
|
||
+
|
||
+ int dma_ch;
|
||
+ struct omap_desc *desc;
|
||
+ unsigned sgidx;
|
||
+};
|
||
+
|
||
+struct omap_sg {
|
||
+ dma_addr_t addr;
|
||
+ uint32_t en; /* number of elements (24-bit) */
|
||
+ uint32_t fn; /* number of frames (16-bit) */
|
||
+};
|
||
+
|
||
+struct omap_desc {
|
||
+ struct virt_dma_desc vd;
|
||
+ enum dma_transfer_direction dir;
|
||
+ dma_addr_t dev_addr;
|
||
+
|
||
+ int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
|
||
+ uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
|
||
+ uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
|
||
+ uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
|
||
+ uint8_t periph_port; /* Peripheral port */
|
||
+
|
||
+ unsigned sglen;
|
||
+ struct omap_sg sg[0];
|
||
+};
|
||
+
|
||
+static const unsigned es_bytes[] = {
|
||
+ [OMAP_DMA_DATA_TYPE_S8] = 1,
|
||
+ [OMAP_DMA_DATA_TYPE_S16] = 2,
|
||
+ [OMAP_DMA_DATA_TYPE_S32] = 4,
|
||
+};
|
||
+
|
||
+static struct of_dma_filter_info omap_dma_info = {
|
||
+ .filter_fn = omap_dma_filter_fn,
|
||
+};
|
||
+
|
||
+static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
|
||
+{
|
||
+ return container_of(d, struct omap_dmadev, ddev);
|
||
+}
|
||
+
|
||
+static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
|
||
+{
|
||
+ return container_of(c, struct omap_chan, vc.chan);
|
||
+}
|
||
+
|
||
+static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
|
||
+{
|
||
+ return container_of(t, struct omap_desc, vd.tx);
|
||
+}
|
||
+
|
||
+static void omap_dma_desc_free(struct virt_dma_desc *vd)
|
||
+{
|
||
+ kfree(container_of(vd, struct omap_desc, vd));
|
||
+}
|
||
+
|
||
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
|
||
+ unsigned idx)
|
||
+{
|
||
+ struct omap_sg *sg = d->sg + idx;
|
||
+
|
||
+ if (d->dir == DMA_DEV_TO_MEM)
|
||
+ omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
|
||
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
|
||
+ else
|
||
+ omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
|
||
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
|
||
+
|
||
+ omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
|
||
+ d->sync_mode, c->dma_sig, d->sync_type);
|
||
+
|
||
+ omap_start_dma(c->dma_ch);
|
||
+}
|
||
+
|
||
+static void omap_dma_start_desc(struct omap_chan *c)
|
||
+{
|
||
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
|
||
+ struct omap_desc *d;
|
||
+
|
||
+ if (!vd) {
|
||
+ c->desc = NULL;
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ list_del(&vd->node);
|
||
+
|
||
+ c->desc = d = to_omap_dma_desc(&vd->tx);
|
||
+ c->sgidx = 0;
|
||
+
|
||
+ if (d->dir == DMA_DEV_TO_MEM)
|
||
+ omap_set_dma_src_params(c->dma_ch, d->periph_port,
|
||
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
|
||
+ else
|
||
+ omap_set_dma_dest_params(c->dma_ch, d->periph_port,
|
||
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
|
||
+
|
||
+ omap_dma_start_sg(c, d, 0);
|
||
+}
|
||
+
|
||
+static void omap_dma_callback(int ch, u16 status, void *data)
|
||
+{
|
||
+ struct omap_chan *c = data;
|
||
+ struct omap_desc *d;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ d = c->desc;
|
||
+ if (d) {
|
||
+ if (!c->cyclic) {
|
||
+ if (++c->sgidx < d->sglen) {
|
||
+ omap_dma_start_sg(c, d, c->sgidx);
|
||
+ } else {
|
||
+ omap_dma_start_desc(c);
|
||
+ vchan_cookie_complete(&d->vd);
|
||
+ }
|
||
+ } else {
|
||
+ vchan_cyclic_callback(&d->vd);
|
||
+ }
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+}
|
||
+
|
||
+/*
|
||
+ * This callback schedules all pending channels. We could be more
|
||
+ * clever here by postponing allocation of the real DMA channels to
|
||
+ * this point, and freeing them when our virtual channel becomes idle.
|
||
+ *
|
||
+ * We would then need to deal with 'all channels in-use'
|
||
+ */
|
||
+static void omap_dma_sched(unsigned long data)
|
||
+{
|
||
+ struct omap_dmadev *d = (struct omap_dmadev *)data;
|
||
+ LIST_HEAD(head);
|
||
+
|
||
+ spin_lock_irq(&d->lock);
|
||
+ list_splice_tail_init(&d->pending, &head);
|
||
+ spin_unlock_irq(&d->lock);
|
||
+
|
||
+ while (!list_empty(&head)) {
|
||
+ struct omap_chan *c = list_first_entry(&head,
|
||
+ struct omap_chan, node);
|
||
+
|
||
+ spin_lock_irq(&c->vc.lock);
|
||
+ list_del_init(&c->node);
|
||
+ omap_dma_start_desc(c);
|
||
+ spin_unlock_irq(&c->vc.lock);
|
||
+ }
|
||
+}
|
||
+
|
||
+static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+
|
||
+ dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
|
||
+
|
||
+ return omap_request_dma(c->dma_sig, "DMA engine",
|
||
+ omap_dma_callback, c, &c->dma_ch);
|
||
+}
|
||
+
|
||
+static void omap_dma_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+
|
||
+ vchan_free_chan_resources(&c->vc);
|
||
+ omap_free_dma(c->dma_ch);
|
||
+
|
||
+ dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
|
||
+}
|
||
+
|
||
+static size_t omap_dma_sg_size(struct omap_sg *sg)
|
||
+{
|
||
+ return sg->en * sg->fn;
|
||
+}
|
||
+
|
||
+static size_t omap_dma_desc_size(struct omap_desc *d)
|
||
+{
|
||
+ unsigned i;
|
||
+ size_t size;
|
||
+
|
||
+ for (size = i = 0; i < d->sglen; i++)
|
||
+ size += omap_dma_sg_size(&d->sg[i]);
|
||
+
|
||
+ return size * es_bytes[d->es];
|
||
+}
|
||
+
|
||
+static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
|
||
+{
|
||
+ unsigned i;
|
||
+ size_t size, es_size = es_bytes[d->es];
|
||
+
|
||
+ for (size = i = 0; i < d->sglen; i++) {
|
||
+ size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
|
||
+
|
||
+ if (size)
|
||
+ size += this_size;
|
||
+ else if (addr >= d->sg[i].addr &&
|
||
+ addr < d->sg[i].addr + this_size)
|
||
+ size += d->sg[i].addr + this_size - addr;
|
||
+ }
|
||
+ return size;
|
||
+}
|
||
+
|
||
+static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+ struct virt_dma_desc *vd;
|
||
+ enum dma_status ret;
|
||
+ unsigned long flags;
|
||
+
|
||
+ ret = dma_cookie_status(chan, cookie, txstate);
|
||
+ if (ret == DMA_SUCCESS || !txstate)
|
||
+ return ret;
|
||
+
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ vd = vchan_find_desc(&c->vc, cookie);
|
||
+ if (vd) {
|
||
+ txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
|
||
+ } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
|
||
+ struct omap_desc *d = c->desc;
|
||
+ dma_addr_t pos;
|
||
+
|
||
+ if (d->dir == DMA_MEM_TO_DEV)
|
||
+ pos = omap_get_dma_src_pos(c->dma_ch);
|
||
+ else if (d->dir == DMA_DEV_TO_MEM)
|
||
+ pos = omap_get_dma_dst_pos(c->dma_ch);
|
||
+ else
|
||
+ pos = 0;
|
||
+
|
||
+ txstate->residue = omap_dma_desc_size_pos(d, pos);
|
||
+ } else {
|
||
+ txstate->residue = 0;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void omap_dma_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
|
||
+ /*
|
||
+ * c->cyclic is used only by audio and in this case the DMA need
|
||
+ * to be started without delay.
|
||
+ */
|
||
+ if (!c->cyclic) {
|
||
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
|
||
+ spin_lock(&d->lock);
|
||
+ if (list_empty(&c->node))
|
||
+ list_add_tail(&c->node, &d->pending);
|
||
+ spin_unlock(&d->lock);
|
||
+ tasklet_schedule(&d->task);
|
||
+ } else {
|
||
+ omap_dma_start_desc(c);
|
||
+ }
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
||
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
|
||
+ enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
|
||
+{
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+ enum dma_slave_buswidth dev_width;
|
||
+ struct scatterlist *sgent;
|
||
+ struct omap_desc *d;
|
||
+ dma_addr_t dev_addr;
|
||
+ unsigned i, j = 0, es, en, frame_bytes, sync_type;
|
||
+ u32 burst;
|
||
+
|
||
+ if (dir == DMA_DEV_TO_MEM) {
|
||
+ dev_addr = c->cfg.src_addr;
|
||
+ dev_width = c->cfg.src_addr_width;
|
||
+ burst = c->cfg.src_maxburst;
|
||
+ sync_type = OMAP_DMA_SRC_SYNC;
|
||
+ } else if (dir == DMA_MEM_TO_DEV) {
|
||
+ dev_addr = c->cfg.dst_addr;
|
||
+ dev_width = c->cfg.dst_addr_width;
|
||
+ burst = c->cfg.dst_maxburst;
|
||
+ sync_type = OMAP_DMA_DST_SYNC;
|
||
+ } else {
|
||
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Bus width translates to the element size (ES) */
|
||
+ switch (dev_width) {
|
||
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||
+ es = OMAP_DMA_DATA_TYPE_S8;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||
+ es = OMAP_DMA_DATA_TYPE_S16;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||
+ es = OMAP_DMA_DATA_TYPE_S32;
|
||
+ break;
|
||
+ default: /* not reached */
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Now allocate and setup the descriptor. */
|
||
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
|
||
+ if (!d)
|
||
+ return NULL;
|
||
+
|
||
+ d->dir = dir;
|
||
+ d->dev_addr = dev_addr;
|
||
+ d->es = es;
|
||
+ d->sync_mode = OMAP_DMA_SYNC_FRAME;
|
||
+ d->sync_type = sync_type;
|
||
+ d->periph_port = OMAP_DMA_PORT_TIPB;
|
||
+
|
||
+ /*
|
||
+ * Build our scatterlist entries: each contains the address,
|
||
+ * the number of elements (EN) in each frame, and the number of
|
||
+ * frames (FN). Number of bytes for this entry = ES * EN * FN.
|
||
+ *
|
||
+ * Burst size translates to number of elements with frame sync.
|
||
+ * Note: DMA engine defines burst to be the number of dev-width
|
||
+ * transfers.
|
||
+ */
|
||
+ en = burst;
|
||
+ frame_bytes = es_bytes[es] * en;
|
||
+ for_each_sg(sgl, sgent, sglen, i) {
|
||
+ d->sg[j].addr = sg_dma_address(sgent);
|
||
+ d->sg[j].en = en;
|
||
+ d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
|
||
+ j++;
|
||
+ }
|
||
+
|
||
+ d->sglen = j;
|
||
+
|
||
+ return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
|
||
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||
+ size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
|
||
+ void *context)
|
||
+{
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+ enum dma_slave_buswidth dev_width;
|
||
+ struct omap_desc *d;
|
||
+ dma_addr_t dev_addr;
|
||
+ unsigned es, sync_type;
|
||
+ u32 burst;
|
||
+
|
||
+ if (dir == DMA_DEV_TO_MEM) {
|
||
+ dev_addr = c->cfg.src_addr;
|
||
+ dev_width = c->cfg.src_addr_width;
|
||
+ burst = c->cfg.src_maxburst;
|
||
+ sync_type = OMAP_DMA_SRC_SYNC;
|
||
+ } else if (dir == DMA_MEM_TO_DEV) {
|
||
+ dev_addr = c->cfg.dst_addr;
|
||
+ dev_width = c->cfg.dst_addr_width;
|
||
+ burst = c->cfg.dst_maxburst;
|
||
+ sync_type = OMAP_DMA_DST_SYNC;
|
||
+ } else {
|
||
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Bus width translates to the element size (ES) */
|
||
+ switch (dev_width) {
|
||
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||
+ es = OMAP_DMA_DATA_TYPE_S8;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||
+ es = OMAP_DMA_DATA_TYPE_S16;
|
||
+ break;
|
||
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||
+ es = OMAP_DMA_DATA_TYPE_S32;
|
||
+ break;
|
||
+ default: /* not reached */
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Now allocate and setup the descriptor. */
|
||
+ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
|
||
+ if (!d)
|
||
+ return NULL;
|
||
+
|
||
+ d->dir = dir;
|
||
+ d->dev_addr = dev_addr;
|
||
+ d->fi = burst;
|
||
+ d->es = es;
|
||
+ if (burst)
|
||
+ d->sync_mode = OMAP_DMA_SYNC_PACKET;
|
||
+ else
|
||
+ d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
|
||
+ d->sync_type = sync_type;
|
||
+ d->periph_port = OMAP_DMA_PORT_MPUI;
|
||
+ d->sg[0].addr = buf_addr;
|
||
+ d->sg[0].en = period_len / es_bytes[es];
|
||
+ d->sg[0].fn = buf_len / period_len;
|
||
+ d->sglen = 1;
|
||
+
|
||
+ if (!c->cyclic) {
|
||
+ c->cyclic = true;
|
||
+ omap_dma_link_lch(c->dma_ch, c->dma_ch);
|
||
+
|
||
+ if (flags & DMA_PREP_INTERRUPT)
|
||
+ omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
|
||
+
|
||
+ omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
|
||
+ }
|
||
+
|
||
+ if (dma_omap2plus()) {
|
||
+ omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
|
||
+ omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
|
||
+ }
|
||
+
|
||
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
|
||
+}
|
||
+
|
||
+static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
|
||
+{
|
||
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
|
||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
|
||
+ return -EINVAL;
|
||
+
|
||
+ memcpy(&c->cfg, cfg, sizeof(c->cfg));
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int omap_dma_terminate_all(struct omap_chan *c)
|
||
+{
|
||
+ struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(head);
|
||
+
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+
|
||
+ /* Prevent this channel being scheduled */
|
||
+ spin_lock(&d->lock);
|
||
+ list_del_init(&c->node);
|
||
+ spin_unlock(&d->lock);
|
||
+
|
||
+ /*
|
||
+ * Stop DMA activity: we assume the callback will not be called
|
||
+ * after omap_stop_dma() returns (even if it does, it will see
|
||
+ * c->desc is NULL and exit.)
|
||
+ */
|
||
+ if (c->desc) {
|
||
+ c->desc = NULL;
|
||
+ /* Avoid stopping the dma twice */
|
||
+ if (!c->paused)
|
||
+ omap_stop_dma(c->dma_ch);
|
||
+ }
|
||
+
|
||
+ if (c->cyclic) {
|
||
+ c->cyclic = false;
|
||
+ c->paused = false;
|
||
+ omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
|
||
+ }
|
||
+
|
||
+ vchan_get_all_descriptors(&c->vc, &head);
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+ vchan_dma_desc_free_list(&c->vc, &head);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int omap_dma_pause(struct omap_chan *c)
|
||
+{
|
||
+ /* Pause/Resume only allowed with cyclic mode */
|
||
+ if (!c->cyclic)
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (!c->paused) {
|
||
+ omap_stop_dma(c->dma_ch);
|
||
+ c->paused = true;
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int omap_dma_resume(struct omap_chan *c)
|
||
+{
|
||
+ /* Pause/Resume only allowed with cyclic mode */
|
||
+ if (!c->cyclic)
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (c->paused) {
|
||
+ omap_start_dma(c->dma_ch);
|
||
+ c->paused = false;
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+ int ret;
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
|
||
+ break;
|
||
+
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ ret = omap_dma_terminate_all(c);
|
||
+ break;
|
||
+
|
||
+ case DMA_PAUSE:
|
||
+ ret = omap_dma_pause(c);
|
||
+ break;
|
||
+
|
||
+ case DMA_RESUME:
|
||
+ ret = omap_dma_resume(c);
|
||
+ break;
|
||
+
|
||
+ default:
|
||
+ ret = -ENXIO;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
|
||
+{
|
||
+ struct omap_chan *c;
|
||
+
|
||
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||
+ if (!c)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ c->dma_sig = dma_sig;
|
||
+ c->vc.desc_free = omap_dma_desc_free;
|
||
+ vchan_init(&c->vc, &od->ddev);
|
||
+ INIT_LIST_HEAD(&c->node);
|
||
+
|
||
+ od->ddev.chancnt++;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void omap_dma_free(struct omap_dmadev *od)
|
||
+{
|
||
+ tasklet_kill(&od->task);
|
||
+ while (!list_empty(&od->ddev.channels)) {
|
||
+ struct omap_chan *c = list_first_entry(&od->ddev.channels,
|
||
+ struct omap_chan, vc.chan.device_node);
|
||
+
|
||
+ list_del(&c->vc.chan.device_node);
|
||
+ tasklet_kill(&c->vc.task);
|
||
+ kfree(c);
|
||
+ }
|
||
+ kfree(od);
|
||
+}
|
||
+
|
||
+static int omap_dma_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct omap_dmadev *od;
|
||
+ int rc, i;
|
||
+
|
||
+ od = kzalloc(sizeof(*od), GFP_KERNEL);
|
||
+ if (!od)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
|
||
+ dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
|
||
+ od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
|
||
+ od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
|
||
+ od->ddev.device_tx_status = omap_dma_tx_status;
|
||
+ od->ddev.device_issue_pending = omap_dma_issue_pending;
|
||
+ od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
|
||
+ od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
|
||
+ od->ddev.device_control = omap_dma_control;
|
||
+ od->ddev.dev = &pdev->dev;
|
||
+ INIT_LIST_HEAD(&od->ddev.channels);
|
||
+ INIT_LIST_HEAD(&od->pending);
|
||
+ spin_lock_init(&od->lock);
|
||
+
|
||
+ tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
|
||
+
|
||
+ for (i = 0; i < 127; i++) {
|
||
+ rc = omap_dma_chan_init(od, i);
|
||
+ if (rc) {
|
||
+ omap_dma_free(od);
|
||
+ return rc;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ rc = dma_async_device_register(&od->ddev);
|
||
+ if (rc) {
|
||
+ pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
|
||
+ rc);
|
||
+ omap_dma_free(od);
|
||
+ return rc;
|
||
+ }
|
||
+
|
||
+ platform_set_drvdata(pdev, od);
|
||
+
|
||
+ if (pdev->dev.of_node) {
|
||
+ omap_dma_info.dma_cap = od->ddev.cap_mask;
|
||
+
|
||
+ /* Device-tree DMA controller registration */
|
||
+ rc = of_dma_controller_register(pdev->dev.of_node,
|
||
+ of_dma_simple_xlate, &omap_dma_info);
|
||
+ if (rc) {
|
||
+ pr_warn("OMAP-DMA: failed to register DMA controller\n");
|
||
+ dma_async_device_unregister(&od->ddev);
|
||
+ omap_dma_free(od);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ dev_info(&pdev->dev, "OMAP DMA engine driver\n");
|
||
+
|
||
+ return rc;
|
||
+}
|
||
+
|
||
+static int omap_dma_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct omap_dmadev *od = platform_get_drvdata(pdev);
|
||
+
|
||
+ if (pdev->dev.of_node)
|
||
+ of_dma_controller_free(pdev->dev.of_node);
|
||
+
|
||
+ dma_async_device_unregister(&od->ddev);
|
||
+ omap_dma_free(od);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static const struct of_device_id omap_dma_match[] = {
|
||
+ { .compatible = "ti,omap2420-sdma", },
|
||
+ { .compatible = "ti,omap2430-sdma", },
|
||
+ { .compatible = "ti,omap3430-sdma", },
|
||
+ { .compatible = "ti,omap3630-sdma", },
|
||
+ { .compatible = "ti,omap4430-sdma", },
|
||
+ {},
|
||
+};
|
||
+MODULE_DEVICE_TABLE(of, omap_dma_match);
|
||
+
|
||
+static struct platform_driver omap_dma_driver = {
|
||
+ .probe = omap_dma_probe,
|
||
+ .remove = omap_dma_remove,
|
||
+ .driver = {
|
||
+ .name = "omap-dma-engine",
|
||
+ .owner = THIS_MODULE,
|
||
+ .of_match_table = of_match_ptr(omap_dma_match),
|
||
+ },
|
||
+};
|
||
+
|
||
+bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
|
||
+{
|
||
+ if (chan->device->dev->driver == &omap_dma_driver.driver) {
|
||
+ struct omap_chan *c = to_omap_dma_chan(chan);
|
||
+ unsigned req = *(unsigned *)param;
|
||
+
|
||
+ return req == c->dma_sig;
|
||
+ }
|
||
+ return false;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
|
||
+
|
||
+static int omap_dma_init(void)
|
||
+{
|
||
+ return platform_driver_register(&omap_dma_driver);
|
||
+}
|
||
+subsys_initcall(omap_dma_init);
|
||
+
|
||
+static void __exit omap_dma_exit(void)
|
||
+{
|
||
+ platform_driver_unregister(&omap_dma_driver);
|
||
+}
|
||
+module_exit(omap_dma_exit);
|
||
+
|
||
+MODULE_AUTHOR("Russell King");
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/dma/pl330.c linux-3.0.101.xm510/drivers/dma/pl330.c
|
||
--- linux-3.0.101/drivers/dma/pl330.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/pl330.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -123,7 +123,7 @@
|
||
{
|
||
struct dma_pl330_dmac *pdmac;
|
||
struct dma_pl330_desc *desc;
|
||
- struct dma_pl330_chan *pch;
|
||
+ struct dma_pl330_chan *pch = NULL;
|
||
unsigned long flags;
|
||
|
||
if (list_empty(list))
|
||
@@ -253,25 +253,50 @@
|
||
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
|
||
{
|
||
struct dma_pl330_chan *pch = to_pchan(chan);
|
||
+ struct dma_pl330_peri *peri = pch->chan.private;
|
||
struct dma_pl330_desc *desc;
|
||
+ struct dma_slave_config *slave_config;
|
||
unsigned long flags;
|
||
|
||
- /* Only supports DMA_TERMINATE_ALL */
|
||
- if (cmd != DMA_TERMINATE_ALL)
|
||
- return -ENXIO;
|
||
|
||
- spin_lock_irqsave(&pch->lock, flags);
|
||
+ switch (cmd) {
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ spin_lock_irqsave(&pch->lock, flags);
|
||
|
||
- /* FLUSH the PL330 Channel thread */
|
||
- pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
|
||
+ /* FLUSH the PL330 Channel thread */
|
||
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
|
||
|
||
- /* Mark all desc done */
|
||
- list_for_each_entry(desc, &pch->work_list, node)
|
||
- desc->status = DONE;
|
||
+ /* Mark all desc done */
|
||
+ list_for_each_entry(desc, &pch->work_list, node)
|
||
+ desc->status = DONE;
|
||
|
||
- spin_unlock_irqrestore(&pch->lock, flags);
|
||
+ spin_unlock_irqrestore(&pch->lock, flags);
|
||
|
||
- pl330_tasklet((unsigned long) pch);
|
||
+ pl330_tasklet((unsigned long) pch);
|
||
+ break;
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ slave_config = (struct dma_slave_config *)arg;
|
||
+
|
||
+ if (slave_config->direction == DMA_TO_DEVICE) {
|
||
+ if (slave_config->dst_addr)
|
||
+ peri->fifo_addr = slave_config->dst_addr;
|
||
+ if (slave_config->dst_addr_width)
|
||
+ peri->burst_sz = __ffs(slave_config->dst_addr_width);
|
||
+ //if (slave_config->dst_maxburst)
|
||
+ //pch->chan.mcbuf_sz = slave_config->dst_maxburst;
|
||
+ } else if (slave_config->direction == DMA_FROM_DEVICE) {
|
||
+ if (slave_config->src_addr)
|
||
+ peri->fifo_addr = slave_config->src_addr;
|
||
+ if (slave_config->src_addr_width)
|
||
+ peri->burst_sz = __ffs(slave_config->src_addr_width);
|
||
+ //if (slave_config->src_maxburst)
|
||
+ //pch->chan.mcbuf_sz = slave_config->src_maxburst;
|
||
+ }
|
||
+ break;
|
||
+ default:
|
||
+ dev_err(pch->dmac->pif.dev, "Not supported command.\n");
|
||
+ return -ENXIO;
|
||
+ }
|
||
|
||
return 0;
|
||
}
|
||
@@ -587,6 +612,7 @@
|
||
peri->rqtype != DEVTOMEM)) {
|
||
dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
|
||
__func__, __LINE__);
|
||
+ printk("chan=%d,id =%d", (unsigned int)chan, peri->peri_id);
|
||
return NULL;
|
||
}
|
||
|
||
@@ -848,6 +874,22 @@
|
||
.remove = pl330_remove,
|
||
};
|
||
|
||
+
|
||
+
|
||
+bool pl330_filter(struct dma_chan *chan, void *param)
|
||
+{
|
||
+ u8 *peri_id;
|
||
+ struct dma_pl330_chan *pch = to_pchan(chan);
|
||
+ struct dma_pl330_peri *peri = pch->chan.private;
|
||
+
|
||
+ if (chan->device->dev->driver != &pl330_driver.drv)
|
||
+ return false;
|
||
+
|
||
+ peri_id = &(peri->peri_id);
|
||
+ return *peri_id == (unsigned)param;
|
||
+}
|
||
+EXPORT_SYMBOL(pl330_filter);
|
||
+
|
||
static int __init pl330_init(void)
|
||
{
|
||
return amba_driver_register(&pl330_driver);
|
||
diff -urN linux-3.0.101/drivers/dma/sa11x0-dma.c linux-3.0.101.xm510/drivers/dma/sa11x0-dma.c
|
||
--- linux-3.0.101/drivers/dma/sa11x0-dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sa11x0-dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,1105 @@
|
||
+/*
|
||
+ * SA11x0 DMAengine support
|
||
+ *
|
||
+ * Copyright (C) 2012 Russell King
|
||
+ * Derived in part from arch/arm/mach-sa1100/dma.c,
|
||
+ * Copyright (C) 2000, 2001 by Nicolas Pitre
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+#include <linux/sched.h>
|
||
+#include <linux/device.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/sa11x0-dma.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/spinlock.h>
|
||
+
|
||
+#include "virt-dma.h"
|
||
+
|
||
+#define NR_PHY_CHAN 6
|
||
+#define DMA_ALIGN 3
|
||
+#define DMA_MAX_SIZE 0x1fff
|
||
+#define DMA_CHUNK_SIZE 0x1000
|
||
+
|
||
+#define DMA_DDAR 0x00
|
||
+#define DMA_DCSR_S 0x04
|
||
+#define DMA_DCSR_C 0x08
|
||
+#define DMA_DCSR_R 0x0c
|
||
+#define DMA_DBSA 0x10
|
||
+#define DMA_DBTA 0x14
|
||
+#define DMA_DBSB 0x18
|
||
+#define DMA_DBTB 0x1c
|
||
+#define DMA_SIZE 0x20
|
||
+
|
||
+#define DCSR_RUN (1 << 0)
|
||
+#define DCSR_IE (1 << 1)
|
||
+#define DCSR_ERROR (1 << 2)
|
||
+#define DCSR_DONEA (1 << 3)
|
||
+#define DCSR_STRTA (1 << 4)
|
||
+#define DCSR_DONEB (1 << 5)
|
||
+#define DCSR_STRTB (1 << 6)
|
||
+#define DCSR_BIU (1 << 7)
|
||
+
|
||
+#define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
|
||
+#define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
|
||
+#define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
|
||
+#define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
|
||
+#define DDAR_Ser0UDCTr (0x0 << 4)
|
||
+#define DDAR_Ser0UDCRc (0x1 << 4)
|
||
+#define DDAR_Ser1SDLCTr (0x2 << 4)
|
||
+#define DDAR_Ser1SDLCRc (0x3 << 4)
|
||
+#define DDAR_Ser1UARTTr (0x4 << 4)
|
||
+#define DDAR_Ser1UARTRc (0x5 << 4)
|
||
+#define DDAR_Ser2ICPTr (0x6 << 4)
|
||
+#define DDAR_Ser2ICPRc (0x7 << 4)
|
||
+#define DDAR_Ser3UARTTr (0x8 << 4)
|
||
+#define DDAR_Ser3UARTRc (0x9 << 4)
|
||
+#define DDAR_Ser4MCP0Tr (0xa << 4)
|
||
+#define DDAR_Ser4MCP0Rc (0xb << 4)
|
||
+#define DDAR_Ser4MCP1Tr (0xc << 4)
|
||
+#define DDAR_Ser4MCP1Rc (0xd << 4)
|
||
+#define DDAR_Ser4SSPTr (0xe << 4)
|
||
+#define DDAR_Ser4SSPRc (0xf << 4)
|
||
+
|
||
+struct sa11x0_dma_sg {
|
||
+ u32 addr;
|
||
+ u32 len;
|
||
+};
|
||
+
|
||
+struct sa11x0_dma_desc {
|
||
+ struct virt_dma_desc vd;
|
||
+
|
||
+ u32 ddar;
|
||
+ size_t size;
|
||
+ unsigned period;
|
||
+ bool cyclic;
|
||
+
|
||
+ unsigned sglen;
|
||
+ struct sa11x0_dma_sg sg[0];
|
||
+};
|
||
+
|
||
+struct sa11x0_dma_phy;
|
||
+
|
||
+struct sa11x0_dma_chan {
|
||
+ struct virt_dma_chan vc;
|
||
+
|
||
+ /* protected by c->vc.lock */
|
||
+ struct sa11x0_dma_phy *phy;
|
||
+ enum dma_status status;
|
||
+
|
||
+ /* protected by d->lock */
|
||
+ struct list_head node;
|
||
+
|
||
+ u32 ddar;
|
||
+ const char *name;
|
||
+};
|
||
+
|
||
+struct sa11x0_dma_phy {
|
||
+ void __iomem *base;
|
||
+ struct sa11x0_dma_dev *dev;
|
||
+ unsigned num;
|
||
+
|
||
+ struct sa11x0_dma_chan *vchan;
|
||
+
|
||
+ /* Protected by c->vc.lock */
|
||
+ unsigned sg_load;
|
||
+ struct sa11x0_dma_desc *txd_load;
|
||
+ unsigned sg_done;
|
||
+ struct sa11x0_dma_desc *txd_done;
|
||
+#ifdef CONFIG_PM_SLEEP
|
||
+ u32 dbs[2];
|
||
+ u32 dbt[2];
|
||
+ u32 dcsr;
|
||
+#endif
|
||
+};
|
||
+
|
||
+struct sa11x0_dma_dev {
|
||
+ struct dma_device slave;
|
||
+ void __iomem *base;
|
||
+ spinlock_t lock;
|
||
+ struct tasklet_struct task;
|
||
+ struct list_head chan_pending;
|
||
+ struct sa11x0_dma_phy phy[NR_PHY_CHAN];
|
||
+};
|
||
+
|
||
+static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
|
||
+{
|
||
+ return container_of(chan, struct sa11x0_dma_chan, vc.chan);
|
||
+}
|
||
+
|
||
+static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
|
||
+{
|
||
+ return container_of(dmadev, struct sa11x0_dma_dev, slave);
|
||
+}
|
||
+
|
||
+static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
|
||
+{
|
||
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
|
||
+
|
||
+ return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
|
||
+}
|
||
+
|
||
+static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
|
||
+{
|
||
+ kfree(container_of(vd, struct sa11x0_dma_desc, vd));
|
||
+}
|
||
+
|
||
+static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
|
||
+{
|
||
+ list_del(&txd->vd.node);
|
||
+ p->txd_load = txd;
|
||
+ p->sg_load = 0;
|
||
+
|
||
+ dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
|
||
+ p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
|
||
+}
|
||
+
|
||
+static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
|
||
+ struct sa11x0_dma_chan *c)
|
||
+{
|
||
+ struct sa11x0_dma_desc *txd = p->txd_load;
|
||
+ struct sa11x0_dma_sg *sg;
|
||
+ void __iomem *base = p->base;
|
||
+ unsigned dbsx, dbtx;
|
||
+ u32 dcsr;
|
||
+
|
||
+ if (!txd)
|
||
+ return;
|
||
+
|
||
+ dcsr = readl_relaxed(base + DMA_DCSR_R);
|
||
+
|
||
+ /* Don't try to load the next transfer if both buffers are started */
|
||
+ if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
|
||
+ return;
|
||
+
|
||
+ if (p->sg_load == txd->sglen) {
|
||
+ if (!txd->cyclic) {
|
||
+ struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
|
||
+
|
||
+ /*
|
||
+ * We have reached the end of the current descriptor.
|
||
+ * Peek at the next descriptor, and if compatible with
|
||
+ * the current, start processing it.
|
||
+ */
|
||
+ if (txn && txn->ddar == txd->ddar) {
|
||
+ txd = txn;
|
||
+ sa11x0_dma_start_desc(p, txn);
|
||
+ } else {
|
||
+ p->txd_load = NULL;
|
||
+ return;
|
||
+ }
|
||
+ } else {
|
||
+ /* Cyclic: reset back to beginning */
|
||
+ p->sg_load = 0;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ sg = &txd->sg[p->sg_load++];
|
||
+
|
||
+ /* Select buffer to load according to channel status */
|
||
+ if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
|
||
+ ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
|
||
+ dbsx = DMA_DBSA;
|
||
+ dbtx = DMA_DBTA;
|
||
+ dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
|
||
+ } else {
|
||
+ dbsx = DMA_DBSB;
|
||
+ dbtx = DMA_DBTB;
|
||
+ dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
|
||
+ }
|
||
+
|
||
+ writel_relaxed(sg->addr, base + dbsx);
|
||
+ writel_relaxed(sg->len, base + dbtx);
|
||
+ writel(dcsr, base + DMA_DCSR_S);
|
||
+
|
||
+ dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
|
||
+ p->num, dcsr,
|
||
+ 'A' + (dbsx == DMA_DBSB), sg->addr,
|
||
+ 'A' + (dbtx == DMA_DBTB), sg->len);
|
||
+}
|
||
+
|
||
+static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
|
||
+ struct sa11x0_dma_chan *c)
|
||
+{
|
||
+ struct sa11x0_dma_desc *txd = p->txd_done;
|
||
+
|
||
+ if (++p->sg_done == txd->sglen) {
|
||
+ if (!txd->cyclic) {
|
||
+ vchan_cookie_complete(&txd->vd);
|
||
+
|
||
+ p->sg_done = 0;
|
||
+ p->txd_done = p->txd_load;
|
||
+
|
||
+ if (!p->txd_done)
|
||
+ tasklet_schedule(&p->dev->task);
|
||
+ } else {
|
||
+ if ((p->sg_done % txd->period) == 0)
|
||
+ vchan_cyclic_callback(&txd->vd);
|
||
+
|
||
+ /* Cyclic: reset back to beginning */
|
||
+ p->sg_done = 0;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ sa11x0_dma_start_sg(p, c);
|
||
+}
|
||
+
|
||
+static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
|
||
+{
|
||
+ struct sa11x0_dma_phy *p = dev_id;
|
||
+ struct sa11x0_dma_dev *d = p->dev;
|
||
+ struct sa11x0_dma_chan *c;
|
||
+ u32 dcsr;
|
||
+
|
||
+ dcsr = readl_relaxed(p->base + DMA_DCSR_R);
|
||
+ if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
|
||
+ return IRQ_NONE;
|
||
+
|
||
+ /* Clear reported status bits */
|
||
+ writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
|
||
+ p->base + DMA_DCSR_C);
|
||
+
|
||
+ dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
|
||
+
|
||
+ if (dcsr & DCSR_ERROR) {
|
||
+ dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
|
||
+ p->num, dcsr,
|
||
+ readl_relaxed(p->base + DMA_DDAR),
|
||
+ readl_relaxed(p->base + DMA_DBSA),
|
||
+ readl_relaxed(p->base + DMA_DBTA),
|
||
+ readl_relaxed(p->base + DMA_DBSB),
|
||
+ readl_relaxed(p->base + DMA_DBTB));
|
||
+ }
|
||
+
|
||
+ c = p->vchan;
|
||
+ if (c) {
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ /*
|
||
+ * Now that we're holding the lock, check that the vchan
|
||
+ * really is associated with this pchan before touching the
|
||
+ * hardware. This should always succeed, because we won't
|
||
+ * change p->vchan or c->phy while the channel is actively
|
||
+ * transferring.
|
||
+ */
|
||
+ if (c->phy == p) {
|
||
+ if (dcsr & DCSR_DONEA)
|
||
+ sa11x0_dma_complete(p, c);
|
||
+ if (dcsr & DCSR_DONEB)
|
||
+ sa11x0_dma_complete(p, c);
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+ }
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
|
||
+{
|
||
+ struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
|
||
+
|
||
+ /* If the issued list is empty, we have no further txds to process */
|
||
+ if (txd) {
|
||
+ struct sa11x0_dma_phy *p = c->phy;
|
||
+
|
||
+ sa11x0_dma_start_desc(p, txd);
|
||
+ p->txd_done = txd;
|
||
+ p->sg_done = 0;
|
||
+
|
||
+ /* The channel should not have any transfers started */
|
||
+ WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
|
||
+ (DCSR_STRTA | DCSR_STRTB));
|
||
+
|
||
+ /* Clear the run and start bits before changing DDAR */
|
||
+ writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
|
||
+ p->base + DMA_DCSR_C);
|
||
+ writel_relaxed(txd->ddar, p->base + DMA_DDAR);
|
||
+
|
||
+ /* Try to start both buffers */
|
||
+ sa11x0_dma_start_sg(p, c);
|
||
+ sa11x0_dma_start_sg(p, c);
|
||
+ }
|
||
+}
|
||
+
|
||
+static void sa11x0_dma_tasklet(unsigned long arg)
|
||
+{
|
||
+ struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
|
||
+ struct sa11x0_dma_phy *p;
|
||
+ struct sa11x0_dma_chan *c;
|
||
+ unsigned pch, pch_alloc = 0;
|
||
+
|
||
+ dev_dbg(d->slave.dev, "tasklet enter\n");
|
||
+
|
||
+ list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
|
||
+ spin_lock_irq(&c->vc.lock);
|
||
+ p = c->phy;
|
||
+ if (p && !p->txd_done) {
|
||
+ sa11x0_dma_start_txd(c);
|
||
+ if (!p->txd_done) {
|
||
+ /* No current txd associated with this channel */
|
||
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
|
||
+
|
||
+ /* Mark this channel free */
|
||
+ c->phy = NULL;
|
||
+ p->vchan = NULL;
|
||
+ }
|
||
+ }
|
||
+ spin_unlock_irq(&c->vc.lock);
|
||
+ }
|
||
+
|
||
+ spin_lock_irq(&d->lock);
|
||
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
|
||
+ p = &d->phy[pch];
|
||
+
|
||
+ if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
|
||
+ c = list_first_entry(&d->chan_pending,
|
||
+ struct sa11x0_dma_chan, node);
|
||
+ list_del_init(&c->node);
|
||
+
|
||
+ pch_alloc |= 1 << pch;
|
||
+
|
||
+ /* Mark this channel allocated */
|
||
+ p->vchan = c;
|
||
+
|
||
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
|
||
+ }
|
||
+ }
|
||
+ spin_unlock_irq(&d->lock);
|
||
+
|
||
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
|
||
+ if (pch_alloc & (1 << pch)) {
|
||
+ p = &d->phy[pch];
|
||
+ c = p->vchan;
|
||
+
|
||
+ spin_lock_irq(&c->vc.lock);
|
||
+ c->phy = p;
|
||
+
|
||
+ sa11x0_dma_start_txd(c);
|
||
+ spin_unlock_irq(&c->vc.lock);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ dev_dbg(d->slave.dev, "tasklet exit\n");
|
||
+}
|
||
+
|
||
+
|
||
+static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&d->lock, flags);
|
||
+ list_del_init(&c->node);
|
||
+ spin_unlock_irqrestore(&d->lock, flags);
|
||
+
|
||
+ vchan_free_chan_resources(&c->vc);
|
||
+}
|
||
+
|
||
+static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
|
||
+{
|
||
+ unsigned reg;
|
||
+ u32 dcsr;
|
||
+
|
||
+ dcsr = readl_relaxed(p->base + DMA_DCSR_R);
|
||
+
|
||
+ if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
|
||
+ (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
|
||
+ reg = DMA_DBSA;
|
||
+ else
|
||
+ reg = DMA_DBSB;
|
||
+
|
||
+ return readl_relaxed(p->base + reg);
|
||
+}
|
||
+
|
||
+static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie, struct dma_tx_state *state)
|
||
+{
|
||
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
|
||
+ struct sa11x0_dma_phy *p;
|
||
+ struct virt_dma_desc *vd;
|
||
+ unsigned long flags;
|
||
+ enum dma_status ret;
|
||
+
|
||
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
|
||
+ if (ret == DMA_SUCCESS)
|
||
+ return ret;
|
||
+
|
||
+ if (!state)
|
||
+ return c->status;
|
||
+
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ p = c->phy;
|
||
+
|
||
+ /*
|
||
+ * If the cookie is on our issue queue, then the residue is
|
||
+ * its total size.
|
||
+ */
|
||
+ vd = vchan_find_desc(&c->vc, cookie);
|
||
+ if (vd) {
|
||
+ state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
|
||
+ } else if (!p) {
|
||
+ state->residue = 0;
|
||
+ } else {
|
||
+ struct sa11x0_dma_desc *txd;
|
||
+ size_t bytes = 0;
|
||
+
|
||
+ if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
|
||
+ txd = p->txd_done;
|
||
+ else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
|
||
+ txd = p->txd_load;
|
||
+ else
|
||
+ txd = NULL;
|
||
+
|
||
+ ret = c->status;
|
||
+ if (txd) {
|
||
+ dma_addr_t addr = sa11x0_dma_pos(p);
|
||
+ unsigned i;
|
||
+
|
||
+ dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
|
||
+
|
||
+ for (i = 0; i < txd->sglen; i++) {
|
||
+ dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
|
||
+ i, txd->sg[i].addr, txd->sg[i].len);
|
||
+ if (addr >= txd->sg[i].addr &&
|
||
+ addr < txd->sg[i].addr + txd->sg[i].len) {
|
||
+ unsigned len;
|
||
+
|
||
+ len = txd->sg[i].len -
|
||
+ (addr - txd->sg[i].addr);
|
||
+ dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
|
||
+ i, len);
|
||
+ bytes += len;
|
||
+ i++;
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+ for (; i < txd->sglen; i++) {
|
||
+ dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
|
||
+ i, txd->sg[i].addr, txd->sg[i].len);
|
||
+ bytes += txd->sg[i].len;
|
||
+ }
|
||
+ }
|
||
+ state->residue = bytes;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+
|
||
+ dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * Move pending txds to the issued list, and re-init pending list.
|
||
+ * If not already pending, add this channel to the list of pending
|
||
+ * channels and trigger the tasklet to run.
|
||
+ */
|
||
+static void sa11x0_dma_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ if (vchan_issue_pending(&c->vc)) {
|
||
+ if (!c->phy) {
|
||
+ spin_lock(&d->lock);
|
||
+ if (list_empty(&c->node)) {
|
||
+ list_add_tail(&c->node, &d->chan_pending);
|
||
+ tasklet_schedule(&d->task);
|
||
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
|
||
+ }
|
||
+ spin_unlock(&d->lock);
|
||
+ }
|
||
+ } else
|
||
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
|
||
+ struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
|
||
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
|
||
+{
|
||
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||
+ struct sa11x0_dma_desc *txd;
|
||
+ struct scatterlist *sgent;
|
||
+ unsigned i, j = sglen;
|
||
+ size_t size = 0;
|
||
+
|
||
+ /* SA11x0 channels can only operate in their native direction */
|
||
+ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
|
||
+ dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
|
||
+ &c->vc, c->ddar, dir);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /* Do not allow zero-sized txds */
|
||
+ if (sglen == 0)
|
||
+ return NULL;
|
||
+
|
||
+ for_each_sg(sg, sgent, sglen, i) {
|
||
+ dma_addr_t addr = sg_dma_address(sgent);
|
||
+ unsigned int len = sg_dma_len(sgent);
|
||
+
|
||
+ if (len > DMA_MAX_SIZE)
|
||
+ j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
|
||
+ if (addr & DMA_ALIGN) {
|
||
+ dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
|
||
+ &c->vc, addr);
|
||
+ return NULL;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
|
||
+ if (!txd) {
|
||
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ j = 0;
|
||
+ for_each_sg(sg, sgent, sglen, i) {
|
||
+ dma_addr_t addr = sg_dma_address(sgent);
|
||
+ unsigned len = sg_dma_len(sgent);
|
||
+
|
||
+ size += len;
|
||
+
|
||
+ do {
|
||
+ unsigned tlen = len;
|
||
+
|
||
+ /*
|
||
+ * Check whether the transfer will fit. If not, try
|
||
+ * to split the transfer up such that we end up with
|
||
+ * equal chunks - but make sure that we preserve the
|
||
+ * alignment. This avoids small segments.
|
||
+ */
|
||
+ if (tlen > DMA_MAX_SIZE) {
|
||
+ unsigned mult = DIV_ROUND_UP(tlen,
|
||
+ DMA_MAX_SIZE & ~DMA_ALIGN);
|
||
+
|
||
+ tlen = (tlen / mult) & ~DMA_ALIGN;
|
||
+ }
|
||
+
|
||
+ txd->sg[j].addr = addr;
|
||
+ txd->sg[j].len = tlen;
|
||
+
|
||
+ addr += tlen;
|
||
+ len -= tlen;
|
||
+ j++;
|
||
+ } while (len);
|
||
+ }
|
||
+
|
||
+ txd->ddar = c->ddar;
|
||
+ txd->size = size;
|
||
+ txd->sglen = j;
|
||
+
|
||
+ dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
|
||
+ &c->vc, &txd->vd, txd->size, txd->sglen);
|
||
+
|
||
+ return vchan_tx_prep(&c->vc, &txd->vd, flags);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
|
||
+ struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
|
||
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
|
||
+{
|
||
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||
+ struct sa11x0_dma_desc *txd;
|
||
+ unsigned i, j, k, sglen, sgperiod;
|
||
+
|
||
+ /* SA11x0 channels can only operate in their native direction */
|
||
+ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
|
||
+ dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
|
||
+ &c->vc, c->ddar, dir);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
|
||
+ sglen = size * sgperiod / period;
|
||
+
|
||
+ /* Do not allow zero-sized txds */
|
||
+ if (sglen == 0)
|
||
+ return NULL;
|
||
+
|
||
+ txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
|
||
+ if (!txd) {
|
||
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ for (i = k = 0; i < size / period; i++) {
|
||
+ size_t tlen, len = period;
|
||
+
|
||
+ for (j = 0; j < sgperiod; j++, k++) {
|
||
+ tlen = len;
|
||
+
|
||
+ if (tlen > DMA_MAX_SIZE) {
|
||
+ unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
|
||
+ tlen = (tlen / mult) & ~DMA_ALIGN;
|
||
+ }
|
||
+
|
||
+ txd->sg[k].addr = addr;
|
||
+ txd->sg[k].len = tlen;
|
||
+ addr += tlen;
|
||
+ len -= tlen;
|
||
+ }
|
||
+
|
||
+ WARN_ON(len != 0);
|
||
+ }
|
||
+
|
||
+ WARN_ON(k != sglen);
|
||
+
|
||
+ txd->ddar = c->ddar;
|
||
+ txd->size = size;
|
||
+ txd->sglen = sglen;
|
||
+ txd->cyclic = 1;
|
||
+ txd->period = sgperiod;
|
||
+
|
||
+ return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||
+}
|
||
+
|
||
+static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
|
||
+{
|
||
+ u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
|
||
+ dma_addr_t addr;
|
||
+ enum dma_slave_buswidth width;
|
||
+ u32 maxburst;
|
||
+
|
||
+ if (ddar & DDAR_RW) {
|
||
+ addr = cfg->src_addr;
|
||
+ width = cfg->src_addr_width;
|
||
+ maxburst = cfg->src_maxburst;
|
||
+ } else {
|
||
+ addr = cfg->dst_addr;
|
||
+ width = cfg->dst_addr_width;
|
||
+ maxburst = cfg->dst_maxburst;
|
||
+ }
|
||
+
|
||
+ if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
|
||
+ width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
|
||
+ (maxburst != 4 && maxburst != 8))
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
|
||
+ ddar |= DDAR_DW;
|
||
+ if (maxburst == 8)
|
||
+ ddar |= DDAR_BS;
|
||
+
|
||
+ dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
|
||
+ &c->vc, addr, width, maxburst);
|
||
+
|
||
+ c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
|
||
+ struct sa11x0_dma_phy *p;
|
||
+ LIST_HEAD(head);
|
||
+ unsigned long flags;
|
||
+ int ret;
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
|
||
+
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
|
||
+ /* Clear the tx descriptor lists */
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ vchan_get_all_descriptors(&c->vc, &head);
|
||
+
|
||
+ p = c->phy;
|
||
+ if (p) {
|
||
+ dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
|
||
+ /* vchan is assigned to a pchan - stop the channel */
|
||
+ writel(DCSR_RUN | DCSR_IE |
|
||
+ DCSR_STRTA | DCSR_DONEA |
|
||
+ DCSR_STRTB | DCSR_DONEB,
|
||
+ p->base + DMA_DCSR_C);
|
||
+
|
||
+ if (p->txd_load) {
|
||
+ if (p->txd_load != p->txd_done)
|
||
+ list_add_tail(&p->txd_load->vd.node, &head);
|
||
+ p->txd_load = NULL;
|
||
+ }
|
||
+ if (p->txd_done) {
|
||
+ list_add_tail(&p->txd_done->vd.node, &head);
|
||
+ p->txd_done = NULL;
|
||
+ }
|
||
+ c->phy = NULL;
|
||
+ spin_lock(&d->lock);
|
||
+ p->vchan = NULL;
|
||
+ spin_unlock(&d->lock);
|
||
+ tasklet_schedule(&d->task);
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+ vchan_dma_desc_free_list(&c->vc, &head);
|
||
+ ret = 0;
|
||
+ break;
|
||
+
|
||
+ case DMA_PAUSE:
|
||
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ if (c->status == DMA_IN_PROGRESS) {
|
||
+ c->status = DMA_PAUSED;
|
||
+
|
||
+ p = c->phy;
|
||
+ if (p) {
|
||
+ writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
|
||
+ } else {
|
||
+ spin_lock(&d->lock);
|
||
+ list_del_init(&c->node);
|
||
+ spin_unlock(&d->lock);
|
||
+ }
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+ ret = 0;
|
||
+ break;
|
||
+
|
||
+ case DMA_RESUME:
|
||
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
|
||
+ spin_lock_irqsave(&c->vc.lock, flags);
|
||
+ if (c->status == DMA_PAUSED) {
|
||
+ c->status = DMA_IN_PROGRESS;
|
||
+
|
||
+ p = c->phy;
|
||
+ if (p) {
|
||
+ writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
|
||
+ } else if (!list_empty(&c->vc.desc_issued)) {
|
||
+ spin_lock(&d->lock);
|
||
+ list_add_tail(&c->node, &d->chan_pending);
|
||
+ spin_unlock(&d->lock);
|
||
+ }
|
||
+ }
|
||
+ spin_unlock_irqrestore(&c->vc.lock, flags);
|
||
+ ret = 0;
|
||
+ break;
|
||
+
|
||
+ default:
|
||
+ ret = -ENXIO;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+struct sa11x0_dma_channel_desc {
|
||
+ u32 ddar;
|
||
+ const char *name;
|
||
+};
|
||
+
|
||
+#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
|
||
+static const struct sa11x0_dma_channel_desc chan_desc[] = {
|
||
+ CD(Ser0UDCTr, 0),
|
||
+ CD(Ser0UDCRc, DDAR_RW),
|
||
+ CD(Ser1SDLCTr, 0),
|
||
+ CD(Ser1SDLCRc, DDAR_RW),
|
||
+ CD(Ser1UARTTr, 0),
|
||
+ CD(Ser1UARTRc, DDAR_RW),
|
||
+ CD(Ser2ICPTr, 0),
|
||
+ CD(Ser2ICPRc, DDAR_RW),
|
||
+ CD(Ser3UARTTr, 0),
|
||
+ CD(Ser3UARTRc, DDAR_RW),
|
||
+ CD(Ser4MCP0Tr, 0),
|
||
+ CD(Ser4MCP0Rc, DDAR_RW),
|
||
+ CD(Ser4MCP1Tr, 0),
|
||
+ CD(Ser4MCP1Rc, DDAR_RW),
|
||
+ CD(Ser4SSPTr, 0),
|
||
+ CD(Ser4SSPRc, DDAR_RW),
|
||
+};
|
||
+
|
||
+static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
|
||
+ struct device *dev)
|
||
+{
|
||
+ unsigned i;
|
||
+
|
||
+ dmadev->chancnt = ARRAY_SIZE(chan_desc);
|
||
+ INIT_LIST_HEAD(&dmadev->channels);
|
||
+ dmadev->dev = dev;
|
||
+ dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
|
||
+ dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
|
||
+ dmadev->device_control = sa11x0_dma_control;
|
||
+ dmadev->device_tx_status = sa11x0_dma_tx_status;
|
||
+ dmadev->device_issue_pending = sa11x0_dma_issue_pending;
|
||
+
|
||
+ for (i = 0; i < dmadev->chancnt; i++) {
|
||
+ struct sa11x0_dma_chan *c;
|
||
+
|
||
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||
+ if (!c) {
|
||
+ dev_err(dev, "no memory for channel %u\n", i);
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ c->status = DMA_IN_PROGRESS;
|
||
+ c->ddar = chan_desc[i].ddar;
|
||
+ c->name = chan_desc[i].name;
|
||
+ INIT_LIST_HEAD(&c->node);
|
||
+
|
||
+ c->vc.desc_free = sa11x0_dma_free_desc;
|
||
+ vchan_init(&c->vc, dmadev);
|
||
+ }
|
||
+
|
||
+ return dma_async_device_register(dmadev);
|
||
+}
|
||
+
|
||
+static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
|
||
+ void *data)
|
||
+{
|
||
+ int irq = platform_get_irq(pdev, nr);
|
||
+
|
||
+ if (irq <= 0)
|
||
+ return -ENXIO;
|
||
+
|
||
+ return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
|
||
+}
|
||
+
|
||
+static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
|
||
+ void *data)
|
||
+{
|
||
+ int irq = platform_get_irq(pdev, nr);
|
||
+ if (irq > 0)
|
||
+ free_irq(irq, data);
|
||
+}
|
||
+
|
||
+static void sa11x0_dma_free_channels(struct dma_device *dmadev)
|
||
+{
|
||
+ struct sa11x0_dma_chan *c, *cn;
|
||
+
|
||
+ list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
|
||
+ list_del(&c->vc.chan.device_node);
|
||
+ tasklet_kill(&c->vc.task);
|
||
+ kfree(c);
|
||
+ }
|
||
+}
|
||
+
|
||
+static int sa11x0_dma_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct sa11x0_dma_dev *d;
|
||
+ struct resource *res;
|
||
+ unsigned i;
|
||
+ int ret;
|
||
+
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ if (!res)
|
||
+ return -ENXIO;
|
||
+
|
||
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
|
||
+ if (!d) {
|
||
+ ret = -ENOMEM;
|
||
+ goto err_alloc;
|
||
+ }
|
||
+
|
||
+ spin_lock_init(&d->lock);
|
||
+ INIT_LIST_HEAD(&d->chan_pending);
|
||
+
|
||
+ d->base = ioremap(res->start, resource_size(res));
|
||
+ if (!d->base) {
|
||
+ ret = -ENOMEM;
|
||
+ goto err_ioremap;
|
||
+ }
|
||
+
|
||
+ tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
|
||
+
|
||
+ for (i = 0; i < NR_PHY_CHAN; i++) {
|
||
+ struct sa11x0_dma_phy *p = &d->phy[i];
|
||
+
|
||
+ p->dev = d;
|
||
+ p->num = i;
|
||
+ p->base = d->base + i * DMA_SIZE;
|
||
+ writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
|
||
+ DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
|
||
+ p->base + DMA_DCSR_C);
|
||
+ writel_relaxed(0, p->base + DMA_DDAR);
|
||
+
|
||
+ ret = sa11x0_dma_request_irq(pdev, i, p);
|
||
+ if (ret) {
|
||
+ while (i) {
|
||
+ i--;
|
||
+ sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
|
||
+ }
|
||
+ goto err_irq;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
|
||
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
|
||
+ d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
|
||
+ d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
|
||
+ ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
|
||
+ if (ret) {
|
||
+ dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
|
||
+ ret);
|
||
+ goto err_slave_reg;
|
||
+ }
|
||
+
|
||
+ platform_set_drvdata(pdev, d);
|
||
+ return 0;
|
||
+
|
||
+ err_slave_reg:
|
||
+ sa11x0_dma_free_channels(&d->slave);
|
||
+ for (i = 0; i < NR_PHY_CHAN; i++)
|
||
+ sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
|
||
+ err_irq:
|
||
+ tasklet_kill(&d->task);
|
||
+ iounmap(d->base);
|
||
+ err_ioremap:
|
||
+ kfree(d);
|
||
+ err_alloc:
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int sa11x0_dma_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
|
||
+ unsigned pch;
|
||
+
|
||
+ dma_async_device_unregister(&d->slave);
|
||
+
|
||
+ sa11x0_dma_free_channels(&d->slave);
|
||
+ for (pch = 0; pch < NR_PHY_CHAN; pch++)
|
||
+ sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
|
||
+ tasklet_kill(&d->task);
|
||
+ iounmap(d->base);
|
||
+ kfree(d);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+#ifdef CONFIG_PM_SLEEP
|
||
+static int sa11x0_dma_suspend(struct device *dev)
|
||
+{
|
||
+ struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
||
+ unsigned pch;
|
||
+
|
||
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
|
||
+ struct sa11x0_dma_phy *p = &d->phy[pch];
|
||
+ u32 dcsr, saved_dcsr;
|
||
+
|
||
+ dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
|
||
+ if (dcsr & DCSR_RUN) {
|
||
+ writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
|
||
+ dcsr = readl_relaxed(p->base + DMA_DCSR_R);
|
||
+ }
|
||
+
|
||
+ saved_dcsr &= DCSR_RUN | DCSR_IE;
|
||
+ if (dcsr & DCSR_BIU) {
|
||
+ p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
|
||
+ p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
|
||
+ p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
|
||
+ p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
|
||
+ saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
|
||
+ (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
|
||
+ } else {
|
||
+ p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
|
||
+ p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
|
||
+ p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
|
||
+ p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
|
||
+ saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
|
||
+ }
|
||
+ p->dcsr = saved_dcsr;
|
||
+
|
||
+ writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sa11x0_dma_resume(struct device *dev)
|
||
+{
|
||
+ struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
||
+ unsigned pch;
|
||
+
|
||
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
|
||
+ struct sa11x0_dma_phy *p = &d->phy[pch];
|
||
+ struct sa11x0_dma_desc *txd = NULL;
|
||
+ u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
|
||
+
|
||
+ WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
|
||
+
|
||
+ if (p->txd_done)
|
||
+ txd = p->txd_done;
|
||
+ else if (p->txd_load)
|
||
+ txd = p->txd_load;
|
||
+
|
||
+ if (!txd)
|
||
+ continue;
|
||
+
|
||
+ writel_relaxed(txd->ddar, p->base + DMA_DDAR);
|
||
+
|
||
+ writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
|
||
+ writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
|
||
+ writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
|
||
+ writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
|
||
+ writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+#endif
|
||
+
|
||
+static const struct dev_pm_ops sa11x0_dma_pm_ops = {
|
||
+ .suspend_noirq = sa11x0_dma_suspend,
|
||
+ .resume_noirq = sa11x0_dma_resume,
|
||
+ .freeze_noirq = sa11x0_dma_suspend,
|
||
+ .thaw_noirq = sa11x0_dma_resume,
|
||
+ .poweroff_noirq = sa11x0_dma_suspend,
|
||
+ .restore_noirq = sa11x0_dma_resume,
|
||
+};
|
||
+
|
||
+static struct platform_driver sa11x0_dma_driver = {
|
||
+ .driver = {
|
||
+ .name = "sa11x0-dma",
|
||
+ .owner = THIS_MODULE,
|
||
+ .pm = &sa11x0_dma_pm_ops,
|
||
+ },
|
||
+ .probe = sa11x0_dma_probe,
|
||
+ .remove = sa11x0_dma_remove,
|
||
+};
|
||
+
|
||
+bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
|
||
+{
|
||
+ if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
|
||
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||
+ const char *p = param;
|
||
+
|
||
+ return !strcmp(c->name, p);
|
||
+ }
|
||
+ return false;
|
||
+}
|
||
+EXPORT_SYMBOL(sa11x0_dma_filter_fn);
|
||
+
|
||
+static int __init sa11x0_dma_init(void)
|
||
+{
|
||
+ return platform_driver_register(&sa11x0_dma_driver);
|
||
+}
|
||
+subsys_initcall(sa11x0_dma_init);
|
||
+
|
||
+static void __exit sa11x0_dma_exit(void)
|
||
+{
|
||
+ platform_driver_unregister(&sa11x0_dma_driver);
|
||
+}
|
||
+module_exit(sa11x0_dma_exit);
|
||
+
|
||
+MODULE_AUTHOR("Russell King");
|
||
+MODULE_DESCRIPTION("SA-11x0 DMA driver");
|
||
+MODULE_LICENSE("GPL v2");
|
||
+MODULE_ALIAS("platform:sa11x0-dma");
|
||
diff -urN linux-3.0.101/drivers/dma/sh/Kconfig linux-3.0.101.xm510/drivers/dma/sh/Kconfig
|
||
--- linux-3.0.101/drivers/dma/sh/Kconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sh/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,24 @@
|
||
+#
|
||
+# DMA engine configuration for sh
|
||
+#
|
||
+
|
||
+config SH_DMAE_BASE
|
||
+ bool "Renesas SuperH DMA Engine support"
|
||
+ depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
|
||
+ depends on !SH_DMA_API
|
||
+ default y
|
||
+ select DMA_ENGINE
|
||
+ help
|
||
+ Enable support for the Renesas SuperH DMA controllers.
|
||
+
|
||
+config SH_DMAE
|
||
+ tristate "Renesas SuperH DMAC support"
|
||
+ depends on SH_DMAE_BASE
|
||
+ help
|
||
+ Enable support for the Renesas SuperH DMA controllers.
|
||
+
|
||
+config SUDMAC
|
||
+ tristate "Renesas SUDMAC support"
|
||
+ depends on SH_DMAE_BASE
|
||
+ help
|
||
+ Enable support for the Renesas SUDMAC controllers.
|
||
diff -urN linux-3.0.101/drivers/dma/sh/Makefile linux-3.0.101.xm510/drivers/dma/sh/Makefile
|
||
--- linux-3.0.101/drivers/dma/sh/Makefile 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sh/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,3 @@
|
||
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
|
||
+obj-$(CONFIG_SH_DMAE) += shdma.o
|
||
+obj-$(CONFIG_SUDMAC) += sudmac.o
|
||
diff -urN linux-3.0.101/drivers/dma/sh/shdma-base.c linux-3.0.101.xm510/drivers/dma/sh/shdma-base.c
|
||
--- linux-3.0.101/drivers/dma/sh/shdma-base.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sh/shdma-base.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,940 @@
|
||
+/*
|
||
+ * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
|
||
+ *
|
||
+ * extracted from shdma.c
|
||
+ *
|
||
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||
+ *
|
||
+ * This is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of version 2 of the GNU General Public License as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/delay.h>
|
||
+#include <linux/shdma-base.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/pm_runtime.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/spinlock.h>
|
||
+
|
||
+#include "../dmaengine.h"
|
||
+
|
||
+/* DMA descriptor control */
|
||
+enum shdma_desc_status {
|
||
+ DESC_IDLE,
|
||
+ DESC_PREPARED,
|
||
+ DESC_SUBMITTED,
|
||
+ DESC_COMPLETED, /* completed, have to call callback */
|
||
+ DESC_WAITING, /* callback called, waiting for ack / re-submit */
|
||
+};
|
||
+
|
||
+#define NR_DESCS_PER_CHANNEL 32
|
||
+
|
||
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
|
||
+#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
|
||
+
|
||
+/*
|
||
+ * For slave DMA we assume, that there is a finite number of DMA slaves in the
|
||
+ * system, and that each such slave can only use a finite number of channels.
|
||
+ * We use slave channel IDs to make sure, that no such slave channel ID is
|
||
+ * allocated more than once.
|
||
+ */
|
||
+static unsigned int slave_num = 256;
|
||
+module_param(slave_num, uint, 0444);
|
||
+
|
||
+/* A bitmask with slave_num bits */
|
||
+static unsigned long *shdma_slave_used;
|
||
+
|
||
+/* Called under spin_lock_irq(&schan->chan_lock") */
|
||
+static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
|
||
+{
|
||
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ struct shdma_desc *sdesc;
|
||
+
|
||
+ /* DMA work check */
|
||
+ if (ops->channel_busy(schan))
|
||
+ return;
|
||
+
|
||
+ /* Find the first not transferred descriptor */
|
||
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
|
||
+ if (sdesc->mark == DESC_SUBMITTED) {
|
||
+ ops->start_xfer(schan, sdesc);
|
||
+ break;
|
||
+ }
|
||
+}
|
||
+
|
||
+static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct shdma_desc *chunk, *c, *desc =
|
||
+ container_of(tx, struct shdma_desc, async_tx),
|
||
+ *last = desc;
|
||
+ struct shdma_chan *schan = to_shdma_chan(tx->chan);
|
||
+ dma_async_tx_callback callback = tx->callback;
|
||
+ dma_cookie_t cookie;
|
||
+ bool power_up;
|
||
+
|
||
+ spin_lock_irq(&schan->chan_lock);
|
||
+
|
||
+ power_up = list_empty(&schan->ld_queue);
|
||
+
|
||
+ cookie = dma_cookie_assign(tx);
|
||
+
|
||
+ /* Mark all chunks of this descriptor as submitted, move to the queue */
|
||
+ list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
|
||
+ /*
|
||
+ * All chunks are on the global ld_free, so, we have to find
|
||
+ * the end of the chain ourselves
|
||
+ */
|
||
+ if (chunk != desc && (chunk->mark == DESC_IDLE ||
|
||
+ chunk->async_tx.cookie > 0 ||
|
||
+ chunk->async_tx.cookie == -EBUSY ||
|
||
+ &chunk->node == &schan->ld_free))
|
||
+ break;
|
||
+ chunk->mark = DESC_SUBMITTED;
|
||
+ /* Callback goes to the last chunk */
|
||
+ chunk->async_tx.callback = NULL;
|
||
+ chunk->cookie = cookie;
|
||
+ list_move_tail(&chunk->node, &schan->ld_queue);
|
||
+ last = chunk;
|
||
+
|
||
+ dev_dbg(schan->dev, "submit #%d@%p on %d\n",
|
||
+ tx->cookie, &last->async_tx, schan->id);
|
||
+ }
|
||
+
|
||
+ last->async_tx.callback = callback;
|
||
+ last->async_tx.callback_param = tx->callback_param;
|
||
+
|
||
+ if (power_up) {
|
||
+ int ret;
|
||
+ schan->pm_state = SHDMA_PM_BUSY;
|
||
+
|
||
+ ret = pm_runtime_get(schan->dev);
|
||
+
|
||
+ spin_unlock_irq(&schan->chan_lock);
|
||
+ if (ret < 0)
|
||
+ dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
|
||
+
|
||
+ pm_runtime_barrier(schan->dev);
|
||
+
|
||
+ spin_lock_irq(&schan->chan_lock);
|
||
+
|
||
+ /* Have we been reset, while waiting? */
|
||
+ if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
|
||
+ struct shdma_dev *sdev =
|
||
+ to_shdma_dev(schan->dma_chan.device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ dev_dbg(schan->dev, "Bring up channel %d\n",
|
||
+ schan->id);
|
||
+ /*
|
||
+ * TODO: .xfer_setup() might fail on some platforms.
|
||
+ * Make it int then, on error remove chunks from the
|
||
+ * queue again
|
||
+ */
|
||
+ ops->setup_xfer(schan, schan->slave_id);
|
||
+
|
||
+ if (schan->pm_state == SHDMA_PM_PENDING)
|
||
+ shdma_chan_xfer_ld_queue(schan);
|
||
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||
+ }
|
||
+ } else {
|
||
+ /*
|
||
+ * Tell .device_issue_pending() not to run the queue, interrupts
|
||
+ * will do it anyway
|
||
+ */
|
||
+ schan->pm_state = SHDMA_PM_PENDING;
|
||
+ }
|
||
+
|
||
+ spin_unlock_irq(&schan->chan_lock);
|
||
+
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+/* Called with desc_lock held */
|
||
+static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
|
||
+{
|
||
+ struct shdma_desc *sdesc;
|
||
+
|
||
+ list_for_each_entry(sdesc, &schan->ld_free, node)
|
||
+ if (sdesc->mark != DESC_PREPARED) {
|
||
+ BUG_ON(sdesc->mark != DESC_IDLE);
|
||
+ list_del(&sdesc->node);
|
||
+ return sdesc;
|
||
+ }
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
|
||
+{
|
||
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ int ret;
|
||
+
|
||
+ if (slave_id < 0 || slave_id >= slave_num)
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (test_and_set_bit(slave_id, shdma_slave_used))
|
||
+ return -EBUSY;
|
||
+
|
||
+ ret = ops->set_slave(schan, slave_id, false);
|
||
+ if (ret < 0) {
|
||
+ clear_bit(slave_id, shdma_slave_used);
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ schan->slave_id = slave_id;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * This is the standard shdma filter function to be used as a replacement to the
|
||
+ * "old" method, using the .private pointer. If for some reason you allocate a
|
||
+ * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
|
||
+ * parameter. If this filter is used, the slave driver, after calling
|
||
+ * dma_request_channel(), will also have to call dmaengine_slave_config() with
|
||
+ * .slave_id, .direction, and either .src_addr or .dst_addr set.
|
||
+ * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
|
||
+ * capability! If this becomes a requirement, hardware glue drivers, using this
|
||
+ * services would have to provide their own filters, which first would check
|
||
+ * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
|
||
+ * this, and only then, in case of a match, call this common filter.
|
||
+ */
|
||
+bool shdma_chan_filter(struct dma_chan *chan, void *arg)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ int slave_id = (int)arg;
|
||
+ int ret;
|
||
+
|
||
+ if (slave_id < 0)
|
||
+ /* No slave requested - arbitrary channel */
|
||
+ return true;
|
||
+
|
||
+ if (slave_id >= slave_num)
|
||
+ return false;
|
||
+
|
||
+ ret = ops->set_slave(schan, slave_id, true);
|
||
+ if (ret < 0)
|
||
+ return false;
|
||
+
|
||
+ return true;
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_chan_filter);
|
||
+
|
||
+static int shdma_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ struct shdma_desc *desc;
|
||
+ struct shdma_slave *slave = chan->private;
|
||
+ int ret, i;
|
||
+
|
||
+ /*
|
||
+ * This relies on the guarantee from dmaengine that alloc_chan_resources
|
||
+ * never runs concurrently with itself or free_chan_resources.
|
||
+ */
|
||
+ if (slave) {
|
||
+ /* Legacy mode: .private is set in filter */
|
||
+ ret = shdma_setup_slave(schan, slave->slave_id);
|
||
+ if (ret < 0)
|
||
+ goto esetslave;
|
||
+ } else {
|
||
+ schan->slave_id = -EINVAL;
|
||
+ }
|
||
+
|
||
+ schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
|
||
+ sdev->desc_size, GFP_KERNEL);
|
||
+ if (!schan->desc) {
|
||
+ ret = -ENOMEM;
|
||
+ goto edescalloc;
|
||
+ }
|
||
+ schan->desc_num = NR_DESCS_PER_CHANNEL;
|
||
+
|
||
+ for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
|
||
+ desc = ops->embedded_desc(schan->desc, i);
|
||
+ dma_async_tx_descriptor_init(&desc->async_tx,
|
||
+ &schan->dma_chan);
|
||
+ desc->async_tx.tx_submit = shdma_tx_submit;
|
||
+ desc->mark = DESC_IDLE;
|
||
+
|
||
+ list_add(&desc->node, &schan->ld_free);
|
||
+ }
|
||
+
|
||
+ return NR_DESCS_PER_CHANNEL;
|
||
+
|
||
+edescalloc:
|
||
+ if (slave)
|
||
+esetslave:
|
||
+ clear_bit(slave->slave_id, shdma_slave_used);
|
||
+ chan->private = NULL;
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
|
||
+{
|
||
+ struct shdma_desc *desc, *_desc;
|
||
+ /* Is the "exposed" head of a chain acked? */
|
||
+ bool head_acked = false;
|
||
+ dma_cookie_t cookie = 0;
|
||
+ dma_async_tx_callback callback = NULL;
|
||
+ void *param = NULL;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&schan->chan_lock, flags);
|
||
+ list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
|
||
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
|
||
+
|
||
+ BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
|
||
+ BUG_ON(desc->mark != DESC_SUBMITTED &&
|
||
+ desc->mark != DESC_COMPLETED &&
|
||
+ desc->mark != DESC_WAITING);
|
||
+
|
||
+ /*
|
||
+ * queue is ordered, and we use this loop to (1) clean up all
|
||
+ * completed descriptors, and to (2) update descriptor flags of
|
||
+ * any chunks in a (partially) completed chain
|
||
+ */
|
||
+ if (!all && desc->mark == DESC_SUBMITTED &&
|
||
+ desc->cookie != cookie)
|
||
+ break;
|
||
+
|
||
+ if (tx->cookie > 0)
|
||
+ cookie = tx->cookie;
|
||
+
|
||
+ if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
|
||
+ if (schan->dma_chan.completed_cookie != desc->cookie - 1)
|
||
+ dev_dbg(schan->dev,
|
||
+ "Completing cookie %d, expected %d\n",
|
||
+ desc->cookie,
|
||
+ schan->dma_chan.completed_cookie + 1);
|
||
+ schan->dma_chan.completed_cookie = desc->cookie;
|
||
+ }
|
||
+
|
||
+ /* Call callback on the last chunk */
|
||
+ if (desc->mark == DESC_COMPLETED && tx->callback) {
|
||
+ desc->mark = DESC_WAITING;
|
||
+ callback = tx->callback;
|
||
+ param = tx->callback_param;
|
||
+ dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
|
||
+ tx->cookie, tx, schan->id);
|
||
+ BUG_ON(desc->chunks != 1);
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ if (tx->cookie > 0 || tx->cookie == -EBUSY) {
|
||
+ if (desc->mark == DESC_COMPLETED) {
|
||
+ BUG_ON(tx->cookie < 0);
|
||
+ desc->mark = DESC_WAITING;
|
||
+ }
|
||
+ head_acked = async_tx_test_ack(tx);
|
||
+ } else {
|
||
+ switch (desc->mark) {
|
||
+ case DESC_COMPLETED:
|
||
+ desc->mark = DESC_WAITING;
|
||
+ /* Fall through */
|
||
+ case DESC_WAITING:
|
||
+ if (head_acked)
|
||
+ async_tx_ack(&desc->async_tx);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
|
||
+ tx, tx->cookie);
|
||
+
|
||
+ if (((desc->mark == DESC_COMPLETED ||
|
||
+ desc->mark == DESC_WAITING) &&
|
||
+ async_tx_test_ack(&desc->async_tx)) || all) {
|
||
+ /* Remove from ld_queue list */
|
||
+ desc->mark = DESC_IDLE;
|
||
+
|
||
+ list_move(&desc->node, &schan->ld_free);
|
||
+
|
||
+ if (list_empty(&schan->ld_queue)) {
|
||
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
|
||
+ pm_runtime_put(schan->dev);
|
||
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if (all && !callback)
|
||
+ /*
|
||
+ * Terminating and the loop completed normally: forgive
|
||
+ * uncompleted cookies
|
||
+ */
|
||
+ schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||
+
|
||
+ if (callback)
|
||
+ callback(param);
|
||
+
|
||
+ return callback;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * shdma_chan_ld_cleanup - Clean up link descriptors
|
||
+ *
|
||
+ * Clean up the ld_queue of DMA channel.
|
||
+ */
|
||
+static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
|
||
+{
|
||
+ while (__ld_cleanup(schan, all))
|
||
+ ;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * shdma_free_chan_resources - Free all resources of the channel.
|
||
+ */
|
||
+static void shdma_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ LIST_HEAD(list);
|
||
+
|
||
+ /* Protect against ISR */
|
||
+ spin_lock_irq(&schan->chan_lock);
|
||
+ ops->halt_channel(schan);
|
||
+ spin_unlock_irq(&schan->chan_lock);
|
||
+
|
||
+ /* Now no new interrupts will occur */
|
||
+
|
||
+ /* Prepared and not submitted descriptors can still be on the queue */
|
||
+ if (!list_empty(&schan->ld_queue))
|
||
+ shdma_chan_ld_cleanup(schan, true);
|
||
+
|
||
+ if (schan->slave_id >= 0) {
|
||
+ /* The caller is holding dma_list_mutex */
|
||
+ clear_bit(schan->slave_id, shdma_slave_used);
|
||
+ chan->private = NULL;
|
||
+ }
|
||
+
|
||
+ spin_lock_irq(&schan->chan_lock);
|
||
+
|
||
+ list_splice_init(&schan->ld_free, &list);
|
||
+ schan->desc_num = 0;
|
||
+
|
||
+ spin_unlock_irq(&schan->chan_lock);
|
||
+
|
||
+ kfree(schan->desc);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * shdma_add_desc - get, set up and return one transfer descriptor
|
||
+ * @schan: DMA channel
|
||
+ * @flags: DMA transfer flags
|
||
+ * @dst: destination DMA address, incremented when direction equals
|
||
+ * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
|
||
+ * @src: source DMA address, incremented when direction equals
|
||
+ * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
|
||
+ * @len: DMA transfer length
|
||
+ * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
|
||
+ * @direction: needed for slave DMA to decide which address to keep constant,
|
||
+ * equals DMA_MEM_TO_MEM for MEMCPY
|
||
+ * Returns 0 or an error
|
||
+ * Locks: called with desc_lock held
|
||
+ */
|
||
+static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
|
||
+ unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
|
||
+ struct shdma_desc **first, enum dma_transfer_direction direction)
|
||
+{
|
||
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ struct shdma_desc *new;
|
||
+ size_t copy_size = *len;
|
||
+
|
||
+ if (!copy_size)
|
||
+ return NULL;
|
||
+
|
||
+ /* Allocate the link descriptor from the free list */
|
||
+ new = shdma_get_desc(schan);
|
||
+ if (!new) {
|
||
+ dev_err(schan->dev, "No free link descriptor available\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ ops->desc_setup(schan, new, *src, *dst, ©_size);
|
||
+
|
||
+ if (!*first) {
|
||
+ /* First desc */
|
||
+ new->async_tx.cookie = -EBUSY;
|
||
+ *first = new;
|
||
+ } else {
|
||
+ /* Other desc - invisible to the user */
|
||
+ new->async_tx.cookie = -EINVAL;
|
||
+ }
|
||
+
|
||
+ dev_dbg(schan->dev,
|
||
+ "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
|
||
+ copy_size, *len, *src, *dst, &new->async_tx,
|
||
+ new->async_tx.cookie);
|
||
+
|
||
+ new->mark = DESC_PREPARED;
|
||
+ new->async_tx.flags = flags;
|
||
+ new->direction = direction;
|
||
+ new->partial = 0;
|
||
+
|
||
+ *len -= copy_size;
|
||
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
|
||
+ *src += copy_size;
|
||
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
|
||
+ *dst += copy_size;
|
||
+
|
||
+ return new;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * shdma_prep_sg - prepare transfer descriptors from an SG list
|
||
+ *
|
||
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
|
||
+ * converted to scatter-gather to guarantee consistent locking and a correct
|
||
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
|
||
+ * logically, the SG list is RAM and the addr variable contains slave address,
|
||
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
|
||
+ * and the SG list contains only one element and points at the source buffer.
|
||
+ */
|
||
+static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
|
||
+ struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
|
||
+ enum dma_transfer_direction direction, unsigned long flags)
|
||
+{
|
||
+ struct scatterlist *sg;
|
||
+ struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
|
||
+ LIST_HEAD(tx_list);
|
||
+ int chunks = 0;
|
||
+ unsigned long irq_flags;
|
||
+ int i;
|
||
+
|
||
+ for_each_sg(sgl, sg, sg_len, i)
|
||
+ chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
|
||
+
|
||
+ /* Have to lock the whole loop to protect against concurrent release */
|
||
+ spin_lock_irqsave(&schan->chan_lock, irq_flags);
|
||
+
|
||
+ /*
|
||
+ * Chaining:
|
||
+ * first descriptor is what user is dealing with in all API calls, its
|
||
+ * cookie is at first set to -EBUSY, at tx-submit to a positive
|
||
+ * number
|
||
+ * if more than one chunk is needed further chunks have cookie = -EINVAL
|
||
+ * the last chunk, if not equal to the first, has cookie = -ENOSPC
|
||
+ * all chunks are linked onto the tx_list head with their .node heads
|
||
+ * only during this function, then they are immediately spliced
|
||
+ * back onto the free list in form of a chain
|
||
+ */
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ dma_addr_t sg_addr = sg_dma_address(sg);
|
||
+ size_t len = sg_dma_len(sg);
|
||
+
|
||
+ if (!len)
|
||
+ goto err_get_desc;
|
||
+
|
||
+ do {
|
||
+ dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
|
||
+ i, sg, len, (unsigned long long)sg_addr);
|
||
+
|
||
+ if (direction == DMA_DEV_TO_MEM)
|
||
+ new = shdma_add_desc(schan, flags,
|
||
+ &sg_addr, addr, &len, &first,
|
||
+ direction);
|
||
+ else
|
||
+ new = shdma_add_desc(schan, flags,
|
||
+ addr, &sg_addr, &len, &first,
|
||
+ direction);
|
||
+ if (!new)
|
||
+ goto err_get_desc;
|
||
+
|
||
+ new->chunks = chunks--;
|
||
+ list_add_tail(&new->node, &tx_list);
|
||
+ } while (len);
|
||
+ }
|
||
+
|
||
+ if (new != first)
|
||
+ new->async_tx.cookie = -ENOSPC;
|
||
+
|
||
+ /* Put them back on the free list, so, they don't get lost */
|
||
+ list_splice_tail(&tx_list, &schan->ld_free);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
|
||
+
|
||
+ return &first->async_tx;
|
||
+
|
||
+err_get_desc:
|
||
+ list_for_each_entry(new, &tx_list, node)
|
||
+ new->mark = DESC_IDLE;
|
||
+ list_splice(&tx_list, &schan->ld_free);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *shdma_prep_memcpy(
|
||
+ struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
|
||
+ size_t len, unsigned long flags)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+ struct scatterlist sg;
|
||
+
|
||
+ if (!chan || !len)
|
||
+ return NULL;
|
||
+
|
||
+ BUG_ON(!schan->desc_num);
|
||
+
|
||
+ sg_init_table(&sg, 1);
|
||
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
|
||
+ offset_in_page(dma_src));
|
||
+ sg_dma_address(&sg) = dma_src;
|
||
+ sg_dma_len(&sg) = len;
|
||
+
|
||
+ return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
|
||
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
|
||
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ int slave_id = schan->slave_id;
|
||
+ dma_addr_t slave_addr;
|
||
+
|
||
+ if (!chan)
|
||
+ return NULL;
|
||
+
|
||
+ BUG_ON(!schan->desc_num);
|
||
+
|
||
+ /* Someone calling slave DMA on a generic channel? */
|
||
+ if (slave_id < 0 || !sg_len) {
|
||
+ dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
|
||
+ __func__, sg_len, slave_id);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ slave_addr = ops->slave_addr(schan);
|
||
+
|
||
+ return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
|
||
+ direction, flags);
|
||
+}
|
||
+
|
||
+static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ struct dma_slave_config *config;
|
||
+ unsigned long flags;
|
||
+ int ret;
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ spin_lock_irqsave(&schan->chan_lock, flags);
|
||
+ ops->halt_channel(schan);
|
||
+
|
||
+ if (ops->get_partial && !list_empty(&schan->ld_queue)) {
|
||
+ /* Record partial transfer */
|
||
+ struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
|
||
+ struct shdma_desc, node);
|
||
+ desc->partial = ops->get_partial(schan, desc);
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||
+
|
||
+ shdma_chan_ld_cleanup(schan, true);
|
||
+ break;
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ /*
|
||
+ * So far only .slave_id is used, but the slave drivers are
|
||
+ * encouraged to also set a transfer direction and an address.
|
||
+ */
|
||
+ if (!arg)
|
||
+ return -EINVAL;
|
||
+ /*
|
||
+ * We could lock this, but you shouldn't be configuring the
|
||
+ * channel, while using it...
|
||
+ */
|
||
+ config = (struct dma_slave_config *)arg;
|
||
+ ret = shdma_setup_slave(schan, config->slave_id);
|
||
+ if (ret < 0)
|
||
+ return ret;
|
||
+ break;
|
||
+ default:
|
||
+ return -ENXIO;
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void shdma_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+
|
||
+ spin_lock_irq(&schan->chan_lock);
|
||
+ if (schan->pm_state == SHDMA_PM_ESTABLISHED)
|
||
+ shdma_chan_xfer_ld_queue(schan);
|
||
+ else
|
||
+ schan->pm_state = SHDMA_PM_PENDING;
|
||
+ spin_unlock_irq(&schan->chan_lock);
|
||
+}
|
||
+
|
||
+static enum dma_status shdma_tx_status(struct dma_chan *chan,
|
||
+ dma_cookie_t cookie,
|
||
+ struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct shdma_chan *schan = to_shdma_chan(chan);
|
||
+ enum dma_status status;
|
||
+ unsigned long flags;
|
||
+
|
||
+ shdma_chan_ld_cleanup(schan, false);
|
||
+
|
||
+ spin_lock_irqsave(&schan->chan_lock, flags);
|
||
+
|
||
+ status = dma_cookie_status(chan, cookie, txstate);
|
||
+
|
||
+ /*
|
||
+ * If we don't find cookie on the queue, it has been aborted and we have
|
||
+ * to report error
|
||
+ */
|
||
+ if (status != DMA_SUCCESS) {
|
||
+ struct shdma_desc *sdesc;
|
||
+ status = DMA_ERROR;
|
||
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
|
||
+ if (sdesc->cookie == cookie) {
|
||
+ status = DMA_IN_PROGRESS;
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||
+
|
||
+ return status;
|
||
+}
|
||
+
|
||
+/* Called from error IRQ or NMI */
|
||
+bool shdma_reset(struct shdma_dev *sdev)
|
||
+{
|
||
+ const struct shdma_ops *ops = sdev->ops;
|
||
+ struct shdma_chan *schan;
|
||
+ unsigned int handled = 0;
|
||
+ int i;
|
||
+
|
||
+ /* Reset all channels */
|
||
+ shdma_for_each_chan(schan, sdev, i) {
|
||
+ struct shdma_desc *sdesc;
|
||
+ LIST_HEAD(dl);
|
||
+
|
||
+ if (!schan)
|
||
+ continue;
|
||
+
|
||
+ spin_lock(&schan->chan_lock);
|
||
+
|
||
+ /* Stop the channel */
|
||
+ ops->halt_channel(schan);
|
||
+
|
||
+ list_splice_init(&schan->ld_queue, &dl);
|
||
+
|
||
+ if (!list_empty(&dl)) {
|
||
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
|
||
+ pm_runtime_put(schan->dev);
|
||
+ }
|
||
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||
+
|
||
+ spin_unlock(&schan->chan_lock);
|
||
+
|
||
+ /* Complete all */
|
||
+ list_for_each_entry(sdesc, &dl, node) {
|
||
+ struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
|
||
+ sdesc->mark = DESC_IDLE;
|
||
+ if (tx->callback)
|
||
+ tx->callback(tx->callback_param);
|
||
+ }
|
||
+
|
||
+ spin_lock(&schan->chan_lock);
|
||
+ list_splice(&dl, &schan->ld_free);
|
||
+ spin_unlock(&schan->chan_lock);
|
||
+
|
||
+ handled++;
|
||
+ }
|
||
+
|
||
+ return !!handled;
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_reset);
|
||
+
|
||
+static irqreturn_t chan_irq(int irq, void *dev)
|
||
+{
|
||
+ struct shdma_chan *schan = dev;
|
||
+ const struct shdma_ops *ops =
|
||
+ to_shdma_dev(schan->dma_chan.device)->ops;
|
||
+ irqreturn_t ret;
|
||
+
|
||
+ spin_lock(&schan->chan_lock);
|
||
+
|
||
+ ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
|
||
+
|
||
+ spin_unlock(&schan->chan_lock);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static irqreturn_t chan_irqt(int irq, void *dev)
|
||
+{
|
||
+ struct shdma_chan *schan = dev;
|
||
+ const struct shdma_ops *ops =
|
||
+ to_shdma_dev(schan->dma_chan.device)->ops;
|
||
+ struct shdma_desc *sdesc;
|
||
+
|
||
+ spin_lock_irq(&schan->chan_lock);
|
||
+ list_for_each_entry(sdesc, &schan->ld_queue, node) {
|
||
+ if (sdesc->mark == DESC_SUBMITTED &&
|
||
+ ops->desc_completed(schan, sdesc)) {
|
||
+ dev_dbg(schan->dev, "done #%d@%p\n",
|
||
+ sdesc->async_tx.cookie, &sdesc->async_tx);
|
||
+ sdesc->mark = DESC_COMPLETED;
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+ /* Next desc */
|
||
+ shdma_chan_xfer_ld_queue(schan);
|
||
+ spin_unlock_irq(&schan->chan_lock);
|
||
+
|
||
+ shdma_chan_ld_cleanup(schan, false);
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+int shdma_request_irq(struct shdma_chan *schan, int irq,
|
||
+ unsigned long flags, const char *name)
|
||
+{
|
||
+ int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
|
||
+ flags, name, schan);
|
||
+
|
||
+ schan->irq = ret < 0 ? ret : irq;
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_request_irq);
|
||
+
|
||
+void shdma_free_irq(struct shdma_chan *schan)
|
||
+{
|
||
+ if (schan->irq >= 0)
|
||
+ free_irq(schan->irq, schan);
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_free_irq);
|
||
+
|
||
+void shdma_chan_probe(struct shdma_dev *sdev,
|
||
+ struct shdma_chan *schan, int id)
|
||
+{
|
||
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||
+
|
||
+ /* reference struct dma_device */
|
||
+ schan->dma_chan.device = &sdev->dma_dev;
|
||
+ dma_cookie_init(&schan->dma_chan);
|
||
+
|
||
+ schan->dev = sdev->dma_dev.dev;
|
||
+ schan->id = id;
|
||
+
|
||
+ if (!schan->max_xfer_len)
|
||
+ schan->max_xfer_len = PAGE_SIZE;
|
||
+
|
||
+ spin_lock_init(&schan->chan_lock);
|
||
+
|
||
+ /* Init descripter manage list */
|
||
+ INIT_LIST_HEAD(&schan->ld_queue);
|
||
+ INIT_LIST_HEAD(&schan->ld_free);
|
||
+
|
||
+ /* Add the channel to DMA device channel list */
|
||
+ list_add_tail(&schan->dma_chan.device_node,
|
||
+ &sdev->dma_dev.channels);
|
||
+ sdev->schan[sdev->dma_dev.chancnt++] = schan;
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_chan_probe);
|
||
+
|
||
+void shdma_chan_remove(struct shdma_chan *schan)
|
||
+{
|
||
+ list_del(&schan->dma_chan.device_node);
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_chan_remove);
|
||
+
|
||
+int shdma_init(struct device *dev, struct shdma_dev *sdev,
|
||
+ int chan_num)
|
||
+{
|
||
+ struct dma_device *dma_dev = &sdev->dma_dev;
|
||
+
|
||
+ /*
|
||
+ * Require all call-backs for now, they can trivially be made optional
|
||
+ * later as required
|
||
+ */
|
||
+ if (!sdev->ops ||
|
||
+ !sdev->desc_size ||
|
||
+ !sdev->ops->embedded_desc ||
|
||
+ !sdev->ops->start_xfer ||
|
||
+ !sdev->ops->setup_xfer ||
|
||
+ !sdev->ops->set_slave ||
|
||
+ !sdev->ops->desc_setup ||
|
||
+ !sdev->ops->slave_addr ||
|
||
+ !sdev->ops->channel_busy ||
|
||
+ !sdev->ops->halt_channel ||
|
||
+ !sdev->ops->desc_completed)
|
||
+ return -EINVAL;
|
||
+
|
||
+ sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
|
||
+ if (!sdev->schan)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ INIT_LIST_HEAD(&dma_dev->channels);
|
||
+
|
||
+ /* Common and MEMCPY operations */
|
||
+ dma_dev->device_alloc_chan_resources
|
||
+ = shdma_alloc_chan_resources;
|
||
+ dma_dev->device_free_chan_resources = shdma_free_chan_resources;
|
||
+ dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
|
||
+ dma_dev->device_tx_status = shdma_tx_status;
|
||
+ dma_dev->device_issue_pending = shdma_issue_pending;
|
||
+
|
||
+ /* Compulsory for DMA_SLAVE fields */
|
||
+ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
|
||
+ dma_dev->device_control = shdma_control;
|
||
+
|
||
+ dma_dev->dev = dev;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_init);
|
||
+
|
||
+void shdma_cleanup(struct shdma_dev *sdev)
|
||
+{
|
||
+ kfree(sdev->schan);
|
||
+}
|
||
+EXPORT_SYMBOL(shdma_cleanup);
|
||
+
|
||
+static int __init shdma_enter(void)
|
||
+{
|
||
+ shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
|
||
+ sizeof(long), GFP_KERNEL);
|
||
+ if (!shdma_slave_used)
|
||
+ return -ENOMEM;
|
||
+ return 0;
|
||
+}
|
||
+module_init(shdma_enter);
|
||
+
|
||
+static void __exit shdma_exit(void)
|
||
+{
|
||
+ kfree(shdma_slave_used);
|
||
+}
|
||
+module_exit(shdma_exit);
|
||
+
|
||
+MODULE_LICENSE("GPL v2");
|
||
+MODULE_DESCRIPTION("SH-DMA driver base library");
|
||
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
|
||
diff -urN linux-3.0.101/drivers/dma/sh/shdma.c linux-3.0.101.xm510/drivers/dma/sh/shdma.c
|
||
--- linux-3.0.101/drivers/dma/sh/shdma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sh/shdma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,955 @@
|
||
+/*
|
||
+ * Renesas SuperH DMA Engine support
|
||
+ *
|
||
+ * base is drivers/dma/flsdma.c
|
||
+ *
|
||
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||
+ *
|
||
+ * This is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License as published by
|
||
+ * the Free Software Foundation; either version 2 of the License, or
|
||
+ * (at your option) any later version.
|
||
+ *
|
||
+ * - DMA of SuperH does not have Hardware DMA chain mode.
|
||
+ * - MAX DMA size is 16MB.
|
||
+ *
|
||
+ */
|
||
+
|
||
+#include <linux/init.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/pm_runtime.h>
|
||
+#include <linux/sh_dma.h>
|
||
+#include <linux/notifier.h>
|
||
+#include <linux/kdebug.h>
|
||
+#include <linux/spinlock.h>
|
||
+#include <linux/rculist.h>
|
||
+
|
||
+#include "../dmaengine.h"
|
||
+#include "shdma.h"
|
||
+
|
||
+#define SH_DMAE_DRV_NAME "sh-dma-engine"
|
||
+
|
||
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
|
||
+#define LOG2_DEFAULT_XFER_SIZE 2
|
||
+#define SH_DMA_SLAVE_NUMBER 256
|
||
+#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
|
||
+
|
||
+/*
|
||
+ * Used for write-side mutual exclusion for the global device list,
|
||
+ * read-side synchronization by way of RCU, and per-controller data.
|
||
+ */
|
||
+static DEFINE_SPINLOCK(sh_dmae_lock);
|
||
+static LIST_HEAD(sh_dmae_devices);
|
||
+
|
||
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||
+
|
||
+ __raw_writel(data, shdev->chan_reg +
|
||
+ shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
|
||
+}
|
||
+
|
||
+static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
|
||
+{
|
||
+ __raw_writel(data, sh_dc->base + reg / sizeof(u32));
|
||
+}
|
||
+
|
||
+static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
|
||
+{
|
||
+ return __raw_readl(sh_dc->base + reg / sizeof(u32));
|
||
+}
|
||
+
|
||
+static u16 dmaor_read(struct sh_dmae_device *shdev)
|
||
+{
|
||
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
|
||
+
|
||
+ if (shdev->pdata->dmaor_is_32bit)
|
||
+ return __raw_readl(addr);
|
||
+ else
|
||
+ return __raw_readw(addr);
|
||
+}
|
||
+
|
||
+static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
|
||
+{
|
||
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
|
||
+
|
||
+ if (shdev->pdata->dmaor_is_32bit)
|
||
+ __raw_writel(data, addr);
|
||
+ else
|
||
+ __raw_writew(data, addr);
|
||
+}
|
||
+
|
||
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||
+
|
||
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
|
||
+}
|
||
+
|
||
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||
+
|
||
+ return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
|
||
+}
|
||
+
|
||
+/*
|
||
+ * Reset DMA controller
|
||
+ *
|
||
+ * SH7780 has two DMAOR register
|
||
+ */
|
||
+static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
|
||
+{
|
||
+ unsigned short dmaor;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&sh_dmae_lock, flags);
|
||
+
|
||
+ dmaor = dmaor_read(shdev);
|
||
+ dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
|
||
+
|
||
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
|
||
+}
|
||
+
|
||
+static int sh_dmae_rst(struct sh_dmae_device *shdev)
|
||
+{
|
||
+ unsigned short dmaor;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&sh_dmae_lock, flags);
|
||
+
|
||
+ dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
|
||
+
|
||
+ if (shdev->pdata->chclr_present) {
|
||
+ int i;
|
||
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||
+ if (sh_chan)
|
||
+ chclr_write(sh_chan, 0);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
|
||
+
|
||
+ dmaor = dmaor_read(shdev);
|
||
+
|
||
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
|
||
+
|
||
+ if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
|
||
+ dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
|
||
+ return -EIO;
|
||
+ }
|
||
+ if (shdev->pdata->dmaor_init & ~dmaor)
|
||
+ dev_warn(shdev->shdma_dev.dma_dev.dev,
|
||
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
|
||
+ dmaor, shdev->pdata->dmaor_init);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
|
||
+{
|
||
+ u32 chcr = chcr_read(sh_chan);
|
||
+
|
||
+ if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
|
||
+ return true; /* working */
|
||
+
|
||
+ return false; /* waiting */
|
||
+}
|
||
+
|
||
+static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||
+ struct sh_dmae_pdata *pdata = shdev->pdata;
|
||
+ int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
|
||
+ ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
|
||
+
|
||
+ if (cnt >= pdata->ts_shift_num)
|
||
+ cnt = 0;
|
||
+
|
||
+ return pdata->ts_shift[cnt];
|
||
+}
|
||
+
|
||
+static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||
+ struct sh_dmae_pdata *pdata = shdev->pdata;
|
||
+ int i;
|
||
+
|
||
+ for (i = 0; i < pdata->ts_shift_num; i++)
|
||
+ if (pdata->ts_shift[i] == l2size)
|
||
+ break;
|
||
+
|
||
+ if (i == pdata->ts_shift_num)
|
||
+ i = 0;
|
||
+
|
||
+ return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
|
||
+ ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
|
||
+}
|
||
+
|
||
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
|
||
+{
|
||
+ sh_dmae_writel(sh_chan, hw->sar, SAR);
|
||
+ sh_dmae_writel(sh_chan, hw->dar, DAR);
|
||
+ sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
|
||
+}
|
||
+
|
||
+static void dmae_start(struct sh_dmae_chan *sh_chan)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||
+ u32 chcr = chcr_read(sh_chan);
|
||
+
|
||
+ if (shdev->pdata->needs_tend_set)
|
||
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
|
||
+
|
||
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
|
||
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
|
||
+}
|
||
+
|
||
+static void dmae_init(struct sh_dmae_chan *sh_chan)
|
||
+{
|
||
+ /*
|
||
+ * Default configuration for dual address memory-memory transfer.
|
||
+ * 0x400 represents auto-request.
|
||
+ */
|
||
+ u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
|
||
+ LOG2_DEFAULT_XFER_SIZE);
|
||
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
|
||
+ chcr_write(sh_chan, chcr);
|
||
+}
|
||
+
|
||
+static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
|
||
+{
|
||
+ /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
|
||
+ if (dmae_is_busy(sh_chan))
|
||
+ return -EBUSY;
|
||
+
|
||
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
|
||
+ chcr_write(sh_chan, val);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||
+ struct sh_dmae_pdata *pdata = shdev->pdata;
|
||
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
|
||
+ u16 __iomem *addr = shdev->dmars;
|
||
+ unsigned int shift = chan_pdata->dmars_bit;
|
||
+
|
||
+ if (dmae_is_busy(sh_chan))
|
||
+ return -EBUSY;
|
||
+
|
||
+ if (pdata->no_dmars)
|
||
+ return 0;
|
||
+
|
||
+ /* in the case of a missing DMARS resource use first memory window */
|
||
+ if (!addr)
|
||
+ addr = (u16 __iomem *)shdev->chan_reg;
|
||
+ addr += chan_pdata->dmars / sizeof(u16);
|
||
+
|
||
+ __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
|
||
+ addr);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void sh_dmae_start_xfer(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||
+ shdma_chan);
|
||
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||
+ struct sh_dmae_desc, shdma_desc);
|
||
+ dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
|
||
+ sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
|
||
+ sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
|
||
+ /* Get the ld start address from ld_queue */
|
||
+ dmae_set_reg(sh_chan, &sh_desc->hw);
|
||
+ dmae_start(sh_chan);
|
||
+}
|
||
+
|
||
+static bool sh_dmae_channel_busy(struct shdma_chan *schan)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||
+ shdma_chan);
|
||
+ return dmae_is_busy(sh_chan);
|
||
+}
|
||
+
|
||
+static void sh_dmae_setup_xfer(struct shdma_chan *schan,
|
||
+ int slave_id)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||
+ shdma_chan);
|
||
+
|
||
+ if (slave_id >= 0) {
|
||
+ const struct sh_dmae_slave_config *cfg =
|
||
+ sh_chan->config;
|
||
+
|
||
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||
+ dmae_set_chcr(sh_chan, cfg->chcr);
|
||
+ } else {
|
||
+ dmae_init(sh_chan);
|
||
+ }
|
||
+}
|
||
+
|
||
+static const struct sh_dmae_slave_config *dmae_find_slave(
|
||
+ struct sh_dmae_chan *sh_chan, int slave_id)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||
+ struct sh_dmae_pdata *pdata = shdev->pdata;
|
||
+ const struct sh_dmae_slave_config *cfg;
|
||
+ int i;
|
||
+
|
||
+ if (slave_id >= SH_DMA_SLAVE_NUMBER)
|
||
+ return NULL;
|
||
+
|
||
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||
+ if (cfg->slave_id == slave_id)
|
||
+ return cfg;
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static int sh_dmae_set_slave(struct shdma_chan *schan,
|
||
+ int slave_id, bool try)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||
+ shdma_chan);
|
||
+ const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
|
||
+ if (!cfg)
|
||
+ return -ENXIO;
|
||
+
|
||
+ if (!try)
|
||
+ sh_chan->config = cfg;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void dmae_halt(struct sh_dmae_chan *sh_chan)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||
+ u32 chcr = chcr_read(sh_chan);
|
||
+
|
||
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
|
||
+ chcr_write(sh_chan, chcr);
|
||
+}
|
||
+
|
||
+static int sh_dmae_desc_setup(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc,
|
||
+ dma_addr_t src, dma_addr_t dst, size_t *len)
|
||
+{
|
||
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||
+ struct sh_dmae_desc, shdma_desc);
|
||
+
|
||
+ if (*len > schan->max_xfer_len)
|
||
+ *len = schan->max_xfer_len;
|
||
+
|
||
+ sh_desc->hw.sar = src;
|
||
+ sh_desc->hw.dar = dst;
|
||
+ sh_desc->hw.tcr = *len;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void sh_dmae_halt(struct shdma_chan *schan)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||
+ shdma_chan);
|
||
+ dmae_halt(sh_chan);
|
||
+}
|
||
+
|
||
+static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||
+ shdma_chan);
|
||
+
|
||
+ if (!(chcr_read(sh_chan) & CHCR_TE))
|
||
+ return false;
|
||
+
|
||
+ /* DMA stop */
|
||
+ dmae_halt(sh_chan);
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+static size_t sh_dmae_get_partial(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||
+ shdma_chan);
|
||
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||
+ struct sh_dmae_desc, shdma_desc);
|
||
+ return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
|
||
+ sh_chan->xmit_shift;
|
||
+}
|
||
+
|
||
+/* Called from error IRQ or NMI */
|
||
+static bool sh_dmae_reset(struct sh_dmae_device *shdev)
|
||
+{
|
||
+ bool ret;
|
||
+
|
||
+ /* halt the dma controller */
|
||
+ sh_dmae_ctl_stop(shdev);
|
||
+
|
||
+ /* We cannot detect, which channel caused the error, have to reset all */
|
||
+ ret = shdma_reset(&shdev->shdma_dev);
|
||
+
|
||
+ sh_dmae_rst(shdev);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static irqreturn_t sh_dmae_err(int irq, void *data)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = data;
|
||
+
|
||
+ if (!(dmaor_read(shdev) & DMAOR_AE))
|
||
+ return IRQ_NONE;
|
||
+
|
||
+ sh_dmae_reset(shdev);
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+static bool sh_dmae_desc_completed(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan,
|
||
+ struct sh_dmae_chan, shdma_chan);
|
||
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||
+ struct sh_dmae_desc, shdma_desc);
|
||
+ u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
|
||
+ u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
|
||
+
|
||
+ return (sdesc->direction == DMA_DEV_TO_MEM &&
|
||
+ (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
|
||
+ (sdesc->direction != DMA_DEV_TO_MEM &&
|
||
+ (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
|
||
+}
|
||
+
|
||
+static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
|
||
+{
|
||
+ /* Fast path out if NMIF is not asserted for this controller */
|
||
+ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
|
||
+ return false;
|
||
+
|
||
+ return sh_dmae_reset(shdev);
|
||
+}
|
||
+
|
||
+static int sh_dmae_nmi_handler(struct notifier_block *self,
|
||
+ unsigned long cmd, void *data)
|
||
+{
|
||
+ struct sh_dmae_device *shdev;
|
||
+ int ret = NOTIFY_DONE;
|
||
+ bool triggered;
|
||
+
|
||
+ /*
|
||
+ * Only concern ourselves with NMI events.
|
||
+ *
|
||
+ * Normally we would check the die chain value, but as this needs
|
||
+ * to be architecture independent, check for NMI context instead.
|
||
+ */
|
||
+ if (!in_nmi())
|
||
+ return NOTIFY_DONE;
|
||
+
|
||
+ rcu_read_lock();
|
||
+ list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
|
||
+ /*
|
||
+ * Only stop if one of the controllers has NMIF asserted,
|
||
+ * we do not want to interfere with regular address error
|
||
+ * handling or NMI events that don't concern the DMACs.
|
||
+ */
|
||
+ triggered = sh_dmae_nmi_notify(shdev);
|
||
+ if (triggered == true)
|
||
+ ret = NOTIFY_OK;
|
||
+ }
|
||
+ rcu_read_unlock();
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
|
||
+ .notifier_call = sh_dmae_nmi_handler,
|
||
+
|
||
+ /* Run before NMI debug handler and KGDB */
|
||
+ .priority = 1,
|
||
+};
|
||
+
|
||
+static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
|
||
+ int irq, unsigned long flags)
|
||
+{
|
||
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
|
||
+ struct shdma_dev *sdev = &shdev->shdma_dev;
|
||
+ struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
|
||
+ struct sh_dmae_chan *sh_chan;
|
||
+ struct shdma_chan *schan;
|
||
+ int err;
|
||
+
|
||
+ sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
|
||
+ if (!sh_chan) {
|
||
+ dev_err(sdev->dma_dev.dev,
|
||
+ "No free memory for allocating dma channels!\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ schan = &sh_chan->shdma_chan;
|
||
+ schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
|
||
+
|
||
+ shdma_chan_probe(sdev, schan, id);
|
||
+
|
||
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
|
||
+
|
||
+ /* set up channel irq */
|
||
+ if (pdev->id >= 0)
|
||
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
|
||
+ "sh-dmae%d.%d", pdev->id, id);
|
||
+ else
|
||
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
|
||
+ "sh-dma%d", id);
|
||
+
|
||
+ err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
|
||
+ if (err) {
|
||
+ dev_err(sdev->dma_dev.dev,
|
||
+ "DMA channel %d request_irq error %d\n",
|
||
+ id, err);
|
||
+ goto err_no_irq;
|
||
+ }
|
||
+
|
||
+ shdev->chan[id] = sh_chan;
|
||
+ return 0;
|
||
+
|
||
+err_no_irq:
|
||
+ /* remove from dmaengine device node */
|
||
+ shdma_chan_remove(schan);
|
||
+ kfree(sh_chan);
|
||
+ return err;
|
||
+}
|
||
+
|
||
+static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
|
||
+{
|
||
+ struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
|
||
+ struct shdma_chan *schan;
|
||
+ int i;
|
||
+
|
||
+ shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan,
|
||
+ struct sh_dmae_chan, shdma_chan);
|
||
+ BUG_ON(!schan);
|
||
+
|
||
+ shdma_free_irq(&sh_chan->shdma_chan);
|
||
+
|
||
+ shdma_chan_remove(schan);
|
||
+ kfree(sh_chan);
|
||
+ }
|
||
+ dma_dev->chancnt = 0;
|
||
+}
|
||
+
|
||
+static void sh_dmae_shutdown(struct platform_device *pdev)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
|
||
+ sh_dmae_ctl_stop(shdev);
|
||
+}
|
||
+
|
||
+static int sh_dmae_runtime_suspend(struct device *dev)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sh_dmae_runtime_resume(struct device *dev)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||
+
|
||
+ return sh_dmae_rst(shdev);
|
||
+}
|
||
+
|
||
+#ifdef CONFIG_PM
|
||
+static int sh_dmae_suspend(struct device *dev)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sh_dmae_resume(struct device *dev)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||
+ int i, ret;
|
||
+
|
||
+ ret = sh_dmae_rst(shdev);
|
||
+ if (ret < 0)
|
||
+ dev_err(dev, "Failed to reset!\n");
|
||
+
|
||
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||
+
|
||
+ if (!sh_chan->shdma_chan.desc_num)
|
||
+ continue;
|
||
+
|
||
+ if (sh_chan->shdma_chan.slave_id >= 0) {
|
||
+ const struct sh_dmae_slave_config *cfg = sh_chan->config;
|
||
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||
+ dmae_set_chcr(sh_chan, cfg->chcr);
|
||
+ } else {
|
||
+ dmae_init(sh_chan);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+#else
|
||
+#define sh_dmae_suspend NULL
|
||
+#define sh_dmae_resume NULL
|
||
+#endif
|
||
+
|
||
+const struct dev_pm_ops sh_dmae_pm = {
|
||
+ .suspend = sh_dmae_suspend,
|
||
+ .resume = sh_dmae_resume,
|
||
+ .runtime_suspend = sh_dmae_runtime_suspend,
|
||
+ .runtime_resume = sh_dmae_runtime_resume,
|
||
+};
|
||
+
|
||
+static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
|
||
+{
|
||
+ struct sh_dmae_chan *sh_chan = container_of(schan,
|
||
+ struct sh_dmae_chan, shdma_chan);
|
||
+
|
||
+ /*
|
||
+ * Implicit BUG_ON(!sh_chan->config)
|
||
+ * This is an exclusive slave DMA operation, may only be called after a
|
||
+ * successful slave configuration.
|
||
+ */
|
||
+ return sh_chan->config->addr;
|
||
+}
|
||
+
|
||
+static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
|
||
+{
|
||
+ return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
|
||
+}
|
||
+
|
||
+static const struct shdma_ops sh_dmae_shdma_ops = {
|
||
+ .desc_completed = sh_dmae_desc_completed,
|
||
+ .halt_channel = sh_dmae_halt,
|
||
+ .channel_busy = sh_dmae_channel_busy,
|
||
+ .slave_addr = sh_dmae_slave_addr,
|
||
+ .desc_setup = sh_dmae_desc_setup,
|
||
+ .set_slave = sh_dmae_set_slave,
|
||
+ .setup_xfer = sh_dmae_setup_xfer,
|
||
+ .start_xfer = sh_dmae_start_xfer,
|
||
+ .embedded_desc = sh_dmae_embedded_desc,
|
||
+ .chan_irq = sh_dmae_chan_irq,
|
||
+ .get_partial = sh_dmae_get_partial,
|
||
+};
|
||
+
|
||
+static int sh_dmae_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
|
||
+ unsigned long irqflags = IRQF_DISABLED,
|
||
+ chan_flag[SH_DMAE_MAX_CHANNELS] = {};
|
||
+ int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
|
||
+ int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
|
||
+ struct sh_dmae_device *shdev;
|
||
+ struct dma_device *dma_dev;
|
||
+ struct resource *chan, *dmars, *errirq_res, *chanirq_res;
|
||
+
|
||
+ /* get platform data */
|
||
+ if (!pdata || !pdata->channel_num)
|
||
+ return -ENODEV;
|
||
+
|
||
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ /* DMARS area is optional */
|
||
+ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||
+ /*
|
||
+ * IRQ resources:
|
||
+ * 1. there always must be at least one IRQ IO-resource. On SH4 it is
|
||
+ * the error IRQ, in which case it is the only IRQ in this resource:
|
||
+ * start == end. If it is the only IRQ resource, all channels also
|
||
+ * use the same IRQ.
|
||
+ * 2. DMA channel IRQ resources can be specified one per resource or in
|
||
+ * ranges (start != end)
|
||
+ * 3. iff all events (channels and, optionally, error) on this
|
||
+ * controller use the same IRQ, only one IRQ resource can be
|
||
+ * specified, otherwise there must be one IRQ per channel, even if
|
||
+ * some of them are equal
|
||
+ * 4. if all IRQs on this controller are equal or if some specific IRQs
|
||
+ * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
|
||
+ * requested with the IRQF_SHARED flag
|
||
+ */
|
||
+ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||
+ if (!chan || !errirq_res)
|
||
+ return -ENODEV;
|
||
+
|
||
+ if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
|
||
+ dev_err(&pdev->dev, "DMAC register region already claimed\n");
|
||
+ return -EBUSY;
|
||
+ }
|
||
+
|
||
+ if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
|
||
+ dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
|
||
+ err = -EBUSY;
|
||
+ goto ermrdmars;
|
||
+ }
|
||
+
|
||
+ err = -ENOMEM;
|
||
+ shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
|
||
+ if (!shdev) {
|
||
+ dev_err(&pdev->dev, "Not enough memory\n");
|
||
+ goto ealloc;
|
||
+ }
|
||
+
|
||
+ dma_dev = &shdev->shdma_dev.dma_dev;
|
||
+
|
||
+ shdev->chan_reg = ioremap(chan->start, resource_size(chan));
|
||
+ if (!shdev->chan_reg)
|
||
+ goto emapchan;
|
||
+ if (dmars) {
|
||
+ shdev->dmars = ioremap(dmars->start, resource_size(dmars));
|
||
+ if (!shdev->dmars)
|
||
+ goto emapdmars;
|
||
+ }
|
||
+
|
||
+ if (!pdata->slave_only)
|
||
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
||
+ if (pdata->slave && pdata->slave_num)
|
||
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||
+
|
||
+ /* Default transfer size of 32 bytes requires 32-byte alignment */
|
||
+ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
|
||
+
|
||
+ shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
|
||
+ shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
|
||
+ err = shdma_init(&pdev->dev, &shdev->shdma_dev,
|
||
+ pdata->channel_num);
|
||
+ if (err < 0)
|
||
+ goto eshdma;
|
||
+
|
||
+ /* platform data */
|
||
+ shdev->pdata = pdev->dev.platform_data;
|
||
+
|
||
+ if (pdata->chcr_offset)
|
||
+ shdev->chcr_offset = pdata->chcr_offset;
|
||
+ else
|
||
+ shdev->chcr_offset = CHCR;
|
||
+
|
||
+ if (pdata->chcr_ie_bit)
|
||
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
|
||
+ else
|
||
+ shdev->chcr_ie_bit = CHCR_IE;
|
||
+
|
||
+ platform_set_drvdata(pdev, shdev);
|
||
+
|
||
+ pm_runtime_enable(&pdev->dev);
|
||
+ err = pm_runtime_get_sync(&pdev->dev);
|
||
+ if (err < 0)
|
||
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
|
||
+
|
||
+ spin_lock_irq(&sh_dmae_lock);
|
||
+ list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
|
||
+ spin_unlock_irq(&sh_dmae_lock);
|
||
+
|
||
+ /* reset dma controller - only needed as a test */
|
||
+ err = sh_dmae_rst(shdev);
|
||
+ if (err)
|
||
+ goto rst_err;
|
||
+
|
||
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||
+ chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
|
||
+
|
||
+ if (!chanirq_res)
|
||
+ chanirq_res = errirq_res;
|
||
+ else
|
||
+ irqres++;
|
||
+
|
||
+ if (chanirq_res == errirq_res ||
|
||
+ (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
|
||
+ irqflags = IRQF_SHARED;
|
||
+
|
||
+ errirq = errirq_res->start;
|
||
+
|
||
+ err = request_irq(errirq, sh_dmae_err, irqflags,
|
||
+ "DMAC Address Error", shdev);
|
||
+ if (err) {
|
||
+ dev_err(&pdev->dev,
|
||
+ "DMA failed requesting irq #%d, error %d\n",
|
||
+ errirq, err);
|
||
+ goto eirq_err;
|
||
+ }
|
||
+
|
||
+#else
|
||
+ chanirq_res = errirq_res;
|
||
+#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
|
||
+
|
||
+ if (chanirq_res->start == chanirq_res->end &&
|
||
+ !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
|
||
+ /* Special case - all multiplexed */
|
||
+ for (; irq_cnt < pdata->channel_num; irq_cnt++) {
|
||
+ if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
|
||
+ chan_irq[irq_cnt] = chanirq_res->start;
|
||
+ chan_flag[irq_cnt] = IRQF_SHARED;
|
||
+ } else {
|
||
+ irq_cap = 1;
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+ } else {
|
||
+ do {
|
||
+ for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
|
||
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
|
||
+ irq_cap = 1;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ if ((errirq_res->flags & IORESOURCE_BITS) ==
|
||
+ IORESOURCE_IRQ_SHAREABLE)
|
||
+ chan_flag[irq_cnt] = IRQF_SHARED;
|
||
+ else
|
||
+ chan_flag[irq_cnt] = IRQF_DISABLED;
|
||
+ dev_dbg(&pdev->dev,
|
||
+ "Found IRQ %d for channel %d\n",
|
||
+ i, irq_cnt);
|
||
+ chan_irq[irq_cnt++] = i;
|
||
+ }
|
||
+
|
||
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
|
||
+ break;
|
||
+
|
||
+ chanirq_res = platform_get_resource(pdev,
|
||
+ IORESOURCE_IRQ, ++irqres);
|
||
+ } while (irq_cnt < pdata->channel_num && chanirq_res);
|
||
+ }
|
||
+
|
||
+ /* Create DMA Channel */
|
||
+ for (i = 0; i < irq_cnt; i++) {
|
||
+ err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
|
||
+ if (err)
|
||
+ goto chan_probe_err;
|
||
+ }
|
||
+
|
||
+ if (irq_cap)
|
||
+ dev_notice(&pdev->dev, "Attempting to register %d DMA "
|
||
+ "channels when a maximum of %d are supported.\n",
|
||
+ pdata->channel_num, SH_DMAE_MAX_CHANNELS);
|
||
+
|
||
+ pm_runtime_put(&pdev->dev);
|
||
+
|
||
+ err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
|
||
+ if (err < 0)
|
||
+ goto edmadevreg;
|
||
+
|
||
+ return err;
|
||
+
|
||
+edmadevreg:
|
||
+ pm_runtime_get(&pdev->dev);
|
||
+
|
||
+chan_probe_err:
|
||
+ sh_dmae_chan_remove(shdev);
|
||
+
|
||
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||
+ free_irq(errirq, shdev);
|
||
+eirq_err:
|
||
+#endif
|
||
+rst_err:
|
||
+ spin_lock_irq(&sh_dmae_lock);
|
||
+ list_del_rcu(&shdev->node);
|
||
+ spin_unlock_irq(&sh_dmae_lock);
|
||
+
|
||
+ pm_runtime_put(&pdev->dev);
|
||
+ pm_runtime_disable(&pdev->dev);
|
||
+
|
||
+ platform_set_drvdata(pdev, NULL);
|
||
+ shdma_cleanup(&shdev->shdma_dev);
|
||
+eshdma:
|
||
+ if (dmars)
|
||
+ iounmap(shdev->dmars);
|
||
+emapdmars:
|
||
+ iounmap(shdev->chan_reg);
|
||
+ synchronize_rcu();
|
||
+emapchan:
|
||
+ kfree(shdev);
|
||
+ealloc:
|
||
+ if (dmars)
|
||
+ release_mem_region(dmars->start, resource_size(dmars));
|
||
+ermrdmars:
|
||
+ release_mem_region(chan->start, resource_size(chan));
|
||
+
|
||
+ return err;
|
||
+}
|
||
+
|
||
+static int sh_dmae_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
|
||
+ struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
|
||
+ struct resource *res;
|
||
+ int errirq = platform_get_irq(pdev, 0);
|
||
+
|
||
+ dma_async_device_unregister(dma_dev);
|
||
+
|
||
+ if (errirq > 0)
|
||
+ free_irq(errirq, shdev);
|
||
+
|
||
+ spin_lock_irq(&sh_dmae_lock);
|
||
+ list_del_rcu(&shdev->node);
|
||
+ spin_unlock_irq(&sh_dmae_lock);
|
||
+
|
||
+ pm_runtime_disable(&pdev->dev);
|
||
+
|
||
+ sh_dmae_chan_remove(shdev);
|
||
+ shdma_cleanup(&shdev->shdma_dev);
|
||
+
|
||
+ if (shdev->dmars)
|
||
+ iounmap(shdev->dmars);
|
||
+ iounmap(shdev->chan_reg);
|
||
+
|
||
+ platform_set_drvdata(pdev, NULL);
|
||
+
|
||
+ synchronize_rcu();
|
||
+ kfree(shdev);
|
||
+
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ if (res)
|
||
+ release_mem_region(res->start, resource_size(res));
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||
+ if (res)
|
||
+ release_mem_region(res->start, resource_size(res));
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct platform_driver sh_dmae_driver = {
|
||
+ .driver = {
|
||
+ .owner = THIS_MODULE,
|
||
+ .pm = &sh_dmae_pm,
|
||
+ .name = SH_DMAE_DRV_NAME,
|
||
+ },
|
||
+ .remove = sh_dmae_remove,
|
||
+ .shutdown = sh_dmae_shutdown,
|
||
+};
|
||
+
|
||
+static int __init sh_dmae_init(void)
|
||
+{
|
||
+ /* Wire up NMI handling */
|
||
+ int err = register_die_notifier(&sh_dmae_nmi_notifier);
|
||
+ if (err)
|
||
+ return err;
|
||
+
|
||
+ return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
|
||
+}
|
||
+module_init(sh_dmae_init);
|
||
+
|
||
+static void __exit sh_dmae_exit(void)
|
||
+{
|
||
+ platform_driver_unregister(&sh_dmae_driver);
|
||
+
|
||
+ unregister_die_notifier(&sh_dmae_nmi_notifier);
|
||
+}
|
||
+module_exit(sh_dmae_exit);
|
||
+
|
||
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
|
||
+MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
|
||
+MODULE_LICENSE("GPL");
|
||
+MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
|
||
diff -urN linux-3.0.101/drivers/dma/sh/shdma.h linux-3.0.101.xm510/drivers/dma/sh/shdma.h
|
||
--- linux-3.0.101/drivers/dma/sh/shdma.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sh/shdma.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,64 @@
|
||
+/*
|
||
+ * Renesas SuperH DMA Engine support
|
||
+ *
|
||
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||
+ *
|
||
+ * This is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License as published by
|
||
+ * the Free Software Foundation; either version 2 of the License, or
|
||
+ * (at your option) any later version.
|
||
+ *
|
||
+ */
|
||
+#ifndef __DMA_SHDMA_H
|
||
+#define __DMA_SHDMA_H
|
||
+
|
||
+#include <linux/sh_dma.h>
|
||
+#include <linux/shdma-base.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/list.h>
|
||
+
|
||
+#define SH_DMAE_MAX_CHANNELS 20
|
||
+#define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
|
||
+
|
||
+struct device;
|
||
+
|
||
+struct sh_dmae_chan {
|
||
+ struct shdma_chan shdma_chan;
|
||
+ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
|
||
+ int xmit_shift; /* log_2(bytes_per_xfer) */
|
||
+ u32 __iomem *base;
|
||
+ char dev_id[16]; /* unique name per DMAC of channel */
|
||
+ int pm_error;
|
||
+};
|
||
+
|
||
+struct sh_dmae_device {
|
||
+ struct shdma_dev shdma_dev;
|
||
+ struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
|
||
+ struct sh_dmae_pdata *pdata;
|
||
+ struct list_head node;
|
||
+ u32 __iomem *chan_reg;
|
||
+ u16 __iomem *dmars;
|
||
+ unsigned int chcr_offset;
|
||
+ u32 chcr_ie_bit;
|
||
+};
|
||
+
|
||
+struct sh_dmae_regs {
|
||
+ u32 sar; /* SAR / source address */
|
||
+ u32 dar; /* DAR / destination address */
|
||
+ u32 tcr; /* TCR / transfer count */
|
||
+};
|
||
+
|
||
+struct sh_dmae_desc {
|
||
+ struct sh_dmae_regs hw;
|
||
+ struct shdma_desc shdma_desc;
|
||
+};
|
||
+
|
||
+#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
|
||
+#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
|
||
+#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
|
||
+#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
|
||
+ struct sh_dmae_device, shdma_dev.dma_dev)
|
||
+
|
||
+#endif /* __DMA_SHDMA_H */
|
||
diff -urN linux-3.0.101/drivers/dma/sh/sudmac.c linux-3.0.101.xm510/drivers/dma/sh/sudmac.c
|
||
--- linux-3.0.101/drivers/dma/sh/sudmac.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sh/sudmac.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,428 @@
|
||
+/*
|
||
+ * Renesas SUDMAC support
|
||
+ *
|
||
+ * Copyright (C) 2013 Renesas Solutions Corp.
|
||
+ *
|
||
+ * based on drivers/dma/sh/shdma.c:
|
||
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||
+ *
|
||
+ * This is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of version 2 of the GNU General Public License as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/init.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/sudmac.h>
|
||
+
|
||
+struct sudmac_chan {
|
||
+ struct shdma_chan shdma_chan;
|
||
+ void __iomem *base;
|
||
+ char dev_id[16]; /* unique name per DMAC of channel */
|
||
+
|
||
+ u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */
|
||
+ u32 cfg;
|
||
+ u32 dint_end_bit;
|
||
+};
|
||
+
|
||
+struct sudmac_device {
|
||
+ struct shdma_dev shdma_dev;
|
||
+ struct sudmac_pdata *pdata;
|
||
+ void __iomem *chan_reg;
|
||
+};
|
||
+
|
||
+struct sudmac_regs {
|
||
+ u32 base_addr;
|
||
+ u32 base_byte_count;
|
||
+};
|
||
+
|
||
+struct sudmac_desc {
|
||
+ struct sudmac_regs hw;
|
||
+ struct shdma_desc shdma_desc;
|
||
+};
|
||
+
|
||
+#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
|
||
+#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
|
||
+#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
|
||
+ struct sudmac_device, shdma_dev.dma_dev)
|
||
+
|
||
+/* SUDMAC register */
|
||
+#define SUDMAC_CH0CFG 0x00
|
||
+#define SUDMAC_CH0BA 0x10
|
||
+#define SUDMAC_CH0BBC 0x18
|
||
+#define SUDMAC_CH0CA 0x20
|
||
+#define SUDMAC_CH0CBC 0x28
|
||
+#define SUDMAC_CH0DEN 0x30
|
||
+#define SUDMAC_DSTSCLR 0x38
|
||
+#define SUDMAC_DBUFCTRL 0x3C
|
||
+#define SUDMAC_DINTCTRL 0x40
|
||
+#define SUDMAC_DINTSTS 0x44
|
||
+#define SUDMAC_DINTSTSCLR 0x48
|
||
+#define SUDMAC_CH0SHCTRL 0x50
|
||
+
|
||
+/* Definitions for the sudmac_channel.config */
|
||
+#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
|
||
+#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
|
||
+#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
|
||
+
|
||
+/* Definitions for the sudmac_channel.dint_end_bit */
|
||
+#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
|
||
+#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
|
||
+
|
||
+#define SUDMAC_DRV_NAME "sudmac"
|
||
+
|
||
+static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
|
||
+{
|
||
+ iowrite32(data, sc->base + reg);
|
||
+}
|
||
+
|
||
+static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
|
||
+{
|
||
+ return ioread32(sc->base + reg);
|
||
+}
|
||
+
|
||
+static bool sudmac_is_busy(struct sudmac_chan *sc)
|
||
+{
|
||
+ u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
|
||
+
|
||
+ if (den)
|
||
+ return true; /* working */
|
||
+
|
||
+ return false; /* waiting */
|
||
+}
|
||
+
|
||
+static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
|
||
+ struct shdma_desc *sdesc)
|
||
+{
|
||
+ sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
|
||
+ sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
|
||
+ sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
|
||
+}
|
||
+
|
||
+static void sudmac_start(struct sudmac_chan *sc)
|
||
+{
|
||
+ u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
|
||
+
|
||
+ sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
|
||
+ sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
|
||
+}
|
||
+
|
||
+static void sudmac_start_xfer(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+ struct sudmac_desc *sd = to_desc(sdesc);
|
||
+
|
||
+ sudmac_set_reg(sc, &sd->hw, sdesc);
|
||
+ sudmac_start(sc);
|
||
+}
|
||
+
|
||
+static bool sudmac_channel_busy(struct shdma_chan *schan)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+
|
||
+ return sudmac_is_busy(sc);
|
||
+}
|
||
+
|
||
+static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
|
||
+{
|
||
+}
|
||
+
|
||
+static const struct sudmac_slave_config *sudmac_find_slave(
|
||
+ struct sudmac_chan *sc, int slave_id)
|
||
+{
|
||
+ struct sudmac_device *sdev = to_sdev(sc);
|
||
+ struct sudmac_pdata *pdata = sdev->pdata;
|
||
+ const struct sudmac_slave_config *cfg;
|
||
+ int i;
|
||
+
|
||
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||
+ if (cfg->slave_id == slave_id)
|
||
+ return cfg;
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+ const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
|
||
+
|
||
+ if (!cfg)
|
||
+ return -ENODEV;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static inline void sudmac_dma_halt(struct sudmac_chan *sc)
|
||
+{
|
||
+ u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
|
||
+
|
||
+ sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
|
||
+ sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
|
||
+ sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
|
||
+}
|
||
+
|
||
+static int sudmac_desc_setup(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc,
|
||
+ dma_addr_t src, dma_addr_t dst, size_t *len)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+ struct sudmac_desc *sd = to_desc(sdesc);
|
||
+
|
||
+ dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
|
||
+ __func__, src, dst, *len);
|
||
+
|
||
+ if (*len > schan->max_xfer_len)
|
||
+ *len = schan->max_xfer_len;
|
||
+
|
||
+ if (dst)
|
||
+ sd->hw.base_addr = dst;
|
||
+ else if (src)
|
||
+ sd->hw.base_addr = src;
|
||
+ sd->hw.base_byte_count = *len;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void sudmac_halt(struct shdma_chan *schan)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+
|
||
+ sudmac_dma_halt(sc);
|
||
+}
|
||
+
|
||
+static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+ u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
|
||
+
|
||
+ if (!(dintsts & sc->dint_end_bit))
|
||
+ return false;
|
||
+
|
||
+ /* DMA stop */
|
||
+ sudmac_dma_halt(sc);
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+static size_t sudmac_get_partial(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+ struct sudmac_desc *sd = to_desc(sdesc);
|
||
+ u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
|
||
+
|
||
+ return sd->hw.base_byte_count - current_byte_count;
|
||
+}
|
||
+
|
||
+static bool sudmac_desc_completed(struct shdma_chan *schan,
|
||
+ struct shdma_desc *sdesc)
|
||
+{
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+ struct sudmac_desc *sd = to_desc(sdesc);
|
||
+ u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
|
||
+
|
||
+ return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
|
||
+}
|
||
+
|
||
+static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
|
||
+ unsigned long flags)
|
||
+{
|
||
+ struct shdma_dev *sdev = &su_dev->shdma_dev;
|
||
+ struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
|
||
+ struct sudmac_chan *sc;
|
||
+ struct shdma_chan *schan;
|
||
+ int err;
|
||
+
|
||
+ sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
|
||
+ if (!sc) {
|
||
+ dev_err(sdev->dma_dev.dev,
|
||
+ "No free memory for allocating dma channels!\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ schan = &sc->shdma_chan;
|
||
+ schan->max_xfer_len = 64 * 1024 * 1024 - 1;
|
||
+
|
||
+ shdma_chan_probe(sdev, schan, id);
|
||
+
|
||
+ sc->base = su_dev->chan_reg;
|
||
+
|
||
+ /* get platform_data */
|
||
+ sc->offset = su_dev->pdata->channel->offset;
|
||
+ if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
|
||
+ sc->cfg |= SUDMAC_SENDBUFM;
|
||
+ if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
|
||
+ sc->cfg |= SUDMAC_RCVENDM;
|
||
+ sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
|
||
+
|
||
+ if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
|
||
+ sc->dint_end_bit |= SUDMAC_CH0ENDE;
|
||
+ if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
|
||
+ sc->dint_end_bit |= SUDMAC_CH1ENDE;
|
||
+
|
||
+ /* set up channel irq */
|
||
+ if (pdev->id >= 0)
|
||
+ snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
|
||
+ pdev->id, id);
|
||
+ else
|
||
+ snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
|
||
+
|
||
+ err = shdma_request_irq(schan, irq, flags, sc->dev_id);
|
||
+ if (err) {
|
||
+ dev_err(sdev->dma_dev.dev,
|
||
+ "DMA channel %d request_irq failed %d\n", id, err);
|
||
+ goto err_no_irq;
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+
|
||
+err_no_irq:
|
||
+ /* remove from dmaengine device node */
|
||
+ shdma_chan_remove(schan);
|
||
+ return err;
|
||
+}
|
||
+
|
||
+static void sudmac_chan_remove(struct sudmac_device *su_dev)
|
||
+{
|
||
+ struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
|
||
+ struct shdma_chan *schan;
|
||
+ int i;
|
||
+
|
||
+ shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
|
||
+ struct sudmac_chan *sc = to_chan(schan);
|
||
+
|
||
+ BUG_ON(!schan);
|
||
+
|
||
+ shdma_free_irq(&sc->shdma_chan);
|
||
+ shdma_chan_remove(schan);
|
||
+ }
|
||
+ dma_dev->chancnt = 0;
|
||
+}
|
||
+
|
||
+static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
|
||
+{
|
||
+ /* SUDMAC doesn't need the address */
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
|
||
+{
|
||
+ return &((struct sudmac_desc *)buf)[i].shdma_desc;
|
||
+}
|
||
+
|
||
+static const struct shdma_ops sudmac_shdma_ops = {
|
||
+ .desc_completed = sudmac_desc_completed,
|
||
+ .halt_channel = sudmac_halt,
|
||
+ .channel_busy = sudmac_channel_busy,
|
||
+ .slave_addr = sudmac_slave_addr,
|
||
+ .desc_setup = sudmac_desc_setup,
|
||
+ .set_slave = sudmac_set_slave,
|
||
+ .setup_xfer = sudmac_setup_xfer,
|
||
+ .start_xfer = sudmac_start_xfer,
|
||
+ .embedded_desc = sudmac_embedded_desc,
|
||
+ .chan_irq = sudmac_chan_irq,
|
||
+ .get_partial = sudmac_get_partial,
|
||
+};
|
||
+
|
||
+static int sudmac_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct sudmac_pdata *pdata = pdev->dev.platform_data;
|
||
+ int err, i;
|
||
+ struct sudmac_device *su_dev;
|
||
+ struct dma_device *dma_dev;
|
||
+ struct resource *chan, *irq_res;
|
||
+
|
||
+ /* get platform data */
|
||
+ if (!pdata)
|
||
+ return -ENODEV;
|
||
+
|
||
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||
+ if (!chan || !irq_res)
|
||
+ return -ENODEV;
|
||
+
|
||
+ err = -ENOMEM;
|
||
+ su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
|
||
+ GFP_KERNEL);
|
||
+ if (!su_dev) {
|
||
+ dev_err(&pdev->dev, "Not enough memory\n");
|
||
+ return err;
|
||
+ }
|
||
+
|
||
+ dma_dev = &su_dev->shdma_dev.dma_dev;
|
||
+
|
||
+ su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
|
||
+ if (!su_dev->chan_reg)
|
||
+ return err;
|
||
+
|
||
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||
+
|
||
+ su_dev->shdma_dev.ops = &sudmac_shdma_ops;
|
||
+ su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
|
||
+ err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
|
||
+ if (err < 0)
|
||
+ return err;
|
||
+
|
||
+ /* platform data */
|
||
+ su_dev->pdata = pdev->dev.platform_data;
|
||
+
|
||
+ platform_set_drvdata(pdev, su_dev);
|
||
+
|
||
+ /* Create DMA Channel */
|
||
+ for (i = 0; i < pdata->channel_num; i++) {
|
||
+ err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
|
||
+ if (err)
|
||
+ goto chan_probe_err;
|
||
+ }
|
||
+
|
||
+ err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
|
||
+ if (err < 0)
|
||
+ goto chan_probe_err;
|
||
+
|
||
+ return err;
|
||
+
|
||
+chan_probe_err:
|
||
+ sudmac_chan_remove(su_dev);
|
||
+
|
||
+ platform_set_drvdata(pdev, NULL);
|
||
+ shdma_cleanup(&su_dev->shdma_dev);
|
||
+
|
||
+ return err;
|
||
+}
|
||
+
|
||
+static int sudmac_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct sudmac_device *su_dev = platform_get_drvdata(pdev);
|
||
+ struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
|
||
+
|
||
+ dma_async_device_unregister(dma_dev);
|
||
+ sudmac_chan_remove(su_dev);
|
||
+ shdma_cleanup(&su_dev->shdma_dev);
|
||
+ platform_set_drvdata(pdev, NULL);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct platform_driver sudmac_driver = {
|
||
+ .driver = {
|
||
+ .owner = THIS_MODULE,
|
||
+ .name = SUDMAC_DRV_NAME,
|
||
+ },
|
||
+ .probe = sudmac_probe,
|
||
+ .remove = sudmac_remove,
|
||
+};
|
||
+module_platform_driver(sudmac_driver);
|
||
+
|
||
+MODULE_AUTHOR("Yoshihiro Shimoda");
|
||
+MODULE_DESCRIPTION("Renesas SUDMAC driver");
|
||
+MODULE_LICENSE("GPL v2");
|
||
+MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
|
||
diff -urN linux-3.0.101/drivers/dma/sirf-dma.c linux-3.0.101.xm510/drivers/dma/sirf-dma.c
|
||
--- linux-3.0.101/drivers/dma/sirf-dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/sirf-dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,789 @@
|
||
+/*
|
||
+ * DMA controller driver for CSR SiRFprimaII
|
||
+ *
|
||
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
||
+ *
|
||
+ * Licensed under GPLv2 or later.
|
||
+ */
|
||
+
|
||
+#include <linux/module.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/of_irq.h>
|
||
+#include <linux/of_address.h>
|
||
+#include <linux/of_device.h>
|
||
+#include <linux/of_platform.h>
|
||
+#include <linux/clk.h>
|
||
+#include <linux/sirfsoc_dma.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+
|
||
+#define SIRFSOC_DMA_DESCRIPTORS 16
|
||
+#define SIRFSOC_DMA_CHANNELS 16
|
||
+
|
||
+#define SIRFSOC_DMA_CH_ADDR 0x00
|
||
+#define SIRFSOC_DMA_CH_XLEN 0x04
|
||
+#define SIRFSOC_DMA_CH_YLEN 0x08
|
||
+#define SIRFSOC_DMA_CH_CTRL 0x0C
|
||
+
|
||
+#define SIRFSOC_DMA_WIDTH_0 0x100
|
||
+#define SIRFSOC_DMA_CH_VALID 0x140
|
||
+#define SIRFSOC_DMA_CH_INT 0x144
|
||
+#define SIRFSOC_DMA_INT_EN 0x148
|
||
+#define SIRFSOC_DMA_INT_EN_CLR 0x14C
|
||
+#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
|
||
+#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
|
||
+
|
||
+#define SIRFSOC_DMA_MODE_CTRL_BIT 4
|
||
+#define SIRFSOC_DMA_DIR_CTRL_BIT 5
|
||
+
|
||
+/* xlen and dma_width register is in 4 bytes boundary */
|
||
+#define SIRFSOC_DMA_WORD_LEN 4
|
||
+
|
||
+struct sirfsoc_dma_desc {
|
||
+ struct dma_async_tx_descriptor desc;
|
||
+ struct list_head node;
|
||
+
|
||
+ /* SiRFprimaII 2D-DMA parameters */
|
||
+
|
||
+ int xlen; /* DMA xlen */
|
||
+ int ylen; /* DMA ylen */
|
||
+ int width; /* DMA width */
|
||
+ int dir;
|
||
+ bool cyclic; /* is loop DMA? */
|
||
+ u32 addr; /* DMA buffer address */
|
||
+};
|
||
+
|
||
+struct sirfsoc_dma_chan {
|
||
+ struct dma_chan chan;
|
||
+ struct list_head free;
|
||
+ struct list_head prepared;
|
||
+ struct list_head queued;
|
||
+ struct list_head active;
|
||
+ struct list_head completed;
|
||
+ unsigned long happened_cyclic;
|
||
+ unsigned long completed_cyclic;
|
||
+
|
||
+ /* Lock for this structure */
|
||
+ spinlock_t lock;
|
||
+
|
||
+ int mode;
|
||
+};
|
||
+
|
||
+struct sirfsoc_dma {
|
||
+ struct dma_device dma;
|
||
+ struct tasklet_struct tasklet;
|
||
+ struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
|
||
+ void __iomem *base;
|
||
+ int irq;
|
||
+ struct clk *clk;
|
||
+ bool is_marco;
|
||
+};
|
||
+
|
||
+#define DRV_NAME "sirfsoc_dma"
|
||
+
|
||
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
|
||
+static inline
|
||
+struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
|
||
+{
|
||
+ return container_of(c, struct sirfsoc_dma_chan, chan);
|
||
+}
|
||
+
|
||
+/* Convert struct dma_chan to struct sirfsoc_dma */
|
||
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
|
||
+{
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
|
||
+ return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
|
||
+}
|
||
+
|
||
+/* Execute all queued DMA descriptors */
|
||
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||
+ int cid = schan->chan.chan_id;
|
||
+ struct sirfsoc_dma_desc *sdesc = NULL;
|
||
+
|
||
+ /*
|
||
+ * lock has been held by functions calling this, so we don't hold
|
||
+ * lock again
|
||
+ */
|
||
+
|
||
+ sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
|
||
+ node);
|
||
+ /* Move the first queued descriptor to active list */
|
||
+ list_move_tail(&sdesc->node, &schan->active);
|
||
+
|
||
+ /* Start the DMA transfer */
|
||
+ writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
|
||
+ cid * 4);
|
||
+ writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
|
||
+ (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
|
||
+ sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
|
||
+ writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
|
||
+ SIRFSOC_DMA_CH_XLEN);
|
||
+ writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
|
||
+ SIRFSOC_DMA_CH_YLEN);
|
||
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
|
||
+ (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
|
||
+
|
||
+ /*
|
||
+ * writel has an implict memory write barrier to make sure data is
|
||
+ * flushed into memory before starting DMA
|
||
+ */
|
||
+ writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
|
||
+
|
||
+ if (sdesc->cyclic) {
|
||
+ writel((1 << cid) | 1 << (cid + 16) |
|
||
+ readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
|
||
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||
+ schan->happened_cyclic = schan->completed_cyclic = 0;
|
||
+ }
|
||
+}
|
||
+
|
||
+/* Interrupt handler */
|
||
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = data;
|
||
+ struct sirfsoc_dma_chan *schan;
|
||
+ struct sirfsoc_dma_desc *sdesc = NULL;
|
||
+ u32 is;
|
||
+ int ch;
|
||
+
|
||
+ is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
|
||
+ while ((ch = fls(is) - 1) >= 0) {
|
||
+ is &= ~(1 << ch);
|
||
+ writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
|
||
+ schan = &sdma->channels[ch];
|
||
+
|
||
+ spin_lock(&schan->lock);
|
||
+
|
||
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
|
||
+ node);
|
||
+ if (!sdesc->cyclic) {
|
||
+ /* Execute queued descriptors */
|
||
+ list_splice_tail_init(&schan->active, &schan->completed);
|
||
+ if (!list_empty(&schan->queued))
|
||
+ sirfsoc_dma_execute(schan);
|
||
+ } else
|
||
+ schan->happened_cyclic++;
|
||
+
|
||
+ spin_unlock(&schan->lock);
|
||
+ }
|
||
+
|
||
+ /* Schedule tasklet */
|
||
+ tasklet_schedule(&sdma->tasklet);
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+/* process completed descriptors */
|
||
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
|
||
+{
|
||
+ dma_cookie_t last_cookie = 0;
|
||
+ struct sirfsoc_dma_chan *schan;
|
||
+ struct sirfsoc_dma_desc *sdesc;
|
||
+ struct dma_async_tx_descriptor *desc;
|
||
+ unsigned long flags;
|
||
+ unsigned long happened_cyclic;
|
||
+ LIST_HEAD(list);
|
||
+ int i;
|
||
+
|
||
+ for (i = 0; i < sdma->dma.chancnt; i++) {
|
||
+ schan = &sdma->channels[i];
|
||
+
|
||
+ /* Get all completed descriptors */
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+ if (!list_empty(&schan->completed)) {
|
||
+ list_splice_tail_init(&schan->completed, &list);
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ /* Execute callbacks and run dependencies */
|
||
+ list_for_each_entry(sdesc, &list, node) {
|
||
+ desc = &sdesc->desc;
|
||
+
|
||
+ if (desc->callback)
|
||
+ desc->callback(desc->callback_param);
|
||
+
|
||
+ last_cookie = desc->cookie;
|
||
+ dma_run_dependencies(desc);
|
||
+ }
|
||
+
|
||
+ /* Free descriptors */
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+ list_splice_tail_init(&list, &schan->free);
|
||
+ schan->chan.completed_cookie = last_cookie;
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+ } else {
|
||
+ /* for cyclic channel, desc is always in active list */
|
||
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
|
||
+ node);
|
||
+
|
||
+ if (!sdesc || (sdesc && !sdesc->cyclic)) {
|
||
+ /* without active cyclic DMA */
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+ continue;
|
||
+ }
|
||
+
|
||
+ /* cyclic DMA */
|
||
+ happened_cyclic = schan->happened_cyclic;
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ desc = &sdesc->desc;
|
||
+ while (happened_cyclic != schan->completed_cyclic) {
|
||
+ if (desc->callback)
|
||
+ desc->callback(desc->callback_param);
|
||
+ schan->completed_cyclic++;
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+/* DMA Tasklet */
|
||
+static void sirfsoc_dma_tasklet(unsigned long data)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = (void *)data;
|
||
+
|
||
+ sirfsoc_dma_process_completed(sdma);
|
||
+}
|
||
+
|
||
+/* Submit descriptor to hardware */
|
||
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
|
||
+{
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
|
||
+ struct sirfsoc_dma_desc *sdesc;
|
||
+ unsigned long flags;
|
||
+ dma_cookie_t cookie;
|
||
+
|
||
+ sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ /* Move descriptor to queue */
|
||
+ list_move_tail(&sdesc->node, &schan->queued);
|
||
+
|
||
+ cookie = dma_cookie_assign(txd);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
|
||
+ struct dma_slave_config *config)
|
||
+{
|
||
+ unsigned long flags;
|
||
+
|
||
+ if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
|
||
+ (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
|
||
+ return -EINVAL;
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+ schan->mode = (config->src_maxburst == 4 ? 1 : 0);
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||
+ int cid = schan->chan.chan_id;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ if (!sdma->is_marco) {
|
||
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
|
||
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
|
||
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
||
+ & ~((1 << cid) | 1 << (cid + 16)),
|
||
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||
+ } else {
|
||
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
|
||
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
|
||
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
|
||
+ }
|
||
+
|
||
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
|
||
+
|
||
+ list_splice_tail_init(&schan->active, &schan->free);
|
||
+ list_splice_tail_init(&schan->queued, &schan->free);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||
+ int cid = schan->chan.chan_id;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ if (!sdma->is_marco)
|
||
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
||
+ & ~((1 << cid) | 1 << (cid + 16)),
|
||
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||
+ else
|
||
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
|
||
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||
+ int cid = schan->chan.chan_id;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ if (!sdma->is_marco)
|
||
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
||
+ | ((1 << cid) | 1 << (cid + 16)),
|
||
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||
+ else
|
||
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
|
||
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ struct dma_slave_config *config;
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||
+
|
||
+ switch (cmd) {
|
||
+ case DMA_PAUSE:
|
||
+ return sirfsoc_dma_pause_chan(schan);
|
||
+ case DMA_RESUME:
|
||
+ return sirfsoc_dma_resume_chan(schan);
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ return sirfsoc_dma_terminate_all(schan);
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ config = (struct dma_slave_config *)arg;
|
||
+ return sirfsoc_dma_slave_config(schan, config);
|
||
+
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ return -ENOSYS;
|
||
+}
|
||
+
|
||
+/* Alloc channel resources */
|
||
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||
+ struct sirfsoc_dma_desc *sdesc;
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(descs);
|
||
+ int i;
|
||
+
|
||
+ /* Alloc descriptors for this channel */
|
||
+ for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
|
||
+ sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
|
||
+ if (!sdesc) {
|
||
+ dev_notice(sdma->dma.dev, "Memory allocation error. "
|
||
+ "Allocated only %u descriptors\n", i);
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ dma_async_tx_descriptor_init(&sdesc->desc, chan);
|
||
+ sdesc->desc.flags = DMA_CTRL_ACK;
|
||
+ sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
|
||
+
|
||
+ list_add_tail(&sdesc->node, &descs);
|
||
+ }
|
||
+
|
||
+ /* Return error only if no descriptors were allocated */
|
||
+ if (i == 0)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ list_splice_tail_init(&descs, &schan->free);
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ return i;
|
||
+}
|
||
+
|
||
+/* Free channel resources */
|
||
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
|
||
+{
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||
+ struct sirfsoc_dma_desc *sdesc, *tmp;
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(descs);
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ /* Channel must be idle */
|
||
+ BUG_ON(!list_empty(&schan->prepared));
|
||
+ BUG_ON(!list_empty(&schan->queued));
|
||
+ BUG_ON(!list_empty(&schan->active));
|
||
+ BUG_ON(!list_empty(&schan->completed));
|
||
+
|
||
+ /* Move data */
|
||
+ list_splice_tail_init(&schan->free, &descs);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ /* Free descriptors */
|
||
+ list_for_each_entry_safe(sdesc, tmp, &descs, node)
|
||
+ kfree(sdesc);
|
||
+}
|
||
+
|
||
+/* Send pending descriptor to hardware */
|
||
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
|
||
+{
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ if (list_empty(&schan->active) && !list_empty(&schan->queued))
|
||
+ sirfsoc_dma_execute(schan);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+}
|
||
+
|
||
+/* Check request completion status */
|
||
+static enum dma_status
|
||
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||
+ struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||
+ unsigned long flags;
|
||
+ enum dma_status ret;
|
||
+ struct sirfsoc_dma_desc *sdesc;
|
||
+ int cid = schan->chan.chan_id;
|
||
+ unsigned long dma_pos;
|
||
+ unsigned long dma_request_bytes;
|
||
+ unsigned long residue;
|
||
+
|
||
+ spin_lock_irqsave(&schan->lock, flags);
|
||
+
|
||
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
|
||
+ node);
|
||
+ dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
|
||
+ (sdesc->width * SIRFSOC_DMA_WORD_LEN);
|
||
+
|
||
+ ret = dma_cookie_status(chan, cookie, txstate);
|
||
+ dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR)
|
||
+ << 2;
|
||
+ residue = dma_request_bytes - (dma_pos - sdesc->addr);
|
||
+ dma_set_residue(txstate, residue);
|
||
+
|
||
+ spin_unlock_irqrestore(&schan->lock, flags);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
|
||
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||
+ unsigned long flags)
|
||
+{
|
||
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||
+ struct sirfsoc_dma_desc *sdesc = NULL;
|
||
+ unsigned long iflags;
|
||
+ int ret;
|
||
+
|
||
+ if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
|
||
+ ret = -EINVAL;
|
||
+ goto err_dir;
|
||
+ }
|
||
+
|
||
+ /* Get free descriptor */
|
||
+ spin_lock_irqsave(&schan->lock, iflags);
|
||
+ if (!list_empty(&schan->free)) {
|
||
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
|
||
+ node);
|
||
+ list_del(&sdesc->node);
|
||
+ }
|
||
+ spin_unlock_irqrestore(&schan->lock, iflags);
|
||
+
|
||
+ if (!sdesc) {
|
||
+ /* try to free completed descriptors */
|
||
+ sirfsoc_dma_process_completed(sdma);
|
||
+ ret = 0;
|
||
+ goto no_desc;
|
||
+ }
|
||
+
|
||
+ /* Place descriptor in prepared list */
|
||
+ spin_lock_irqsave(&schan->lock, iflags);
|
||
+
|
||
+ /*
|
||
+ * Number of chunks in a frame can only be 1 for prima2
|
||
+ * and ylen (number of frame - 1) must be at least 0
|
||
+ */
|
||
+ if ((xt->frame_size == 1) && (xt->numf > 0)) {
|
||
+ sdesc->cyclic = 0;
|
||
+ sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
|
||
+ sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
|
||
+ SIRFSOC_DMA_WORD_LEN;
|
||
+ sdesc->ylen = xt->numf - 1;
|
||
+ if (xt->dir == DMA_MEM_TO_DEV) {
|
||
+ sdesc->addr = xt->src_start;
|
||
+ sdesc->dir = 1;
|
||
+ } else {
|
||
+ sdesc->addr = xt->dst_start;
|
||
+ sdesc->dir = 0;
|
||
+ }
|
||
+
|
||
+ list_add_tail(&sdesc->node, &schan->prepared);
|
||
+ } else {
|
||
+ pr_err("sirfsoc DMA Invalid xfer\n");
|
||
+ ret = -EINVAL;
|
||
+ goto err_xfer;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&schan->lock, iflags);
|
||
+
|
||
+ return &sdesc->desc;
|
||
+err_xfer:
|
||
+ spin_unlock_irqrestore(&schan->lock, iflags);
|
||
+no_desc:
|
||
+err_dir:
|
||
+ return ERR_PTR(ret);
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *
|
||
+sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
|
||
+ size_t buf_len, size_t period_len,
|
||
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
|
||
+{
|
||
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||
+ struct sirfsoc_dma_desc *sdesc = NULL;
|
||
+ unsigned long iflags;
|
||
+
|
||
+ /*
|
||
+ * we only support cycle transfer with 2 period
|
||
+ * If the X-length is set to 0, it would be the loop mode.
|
||
+ * The DMA address keeps increasing until reaching the end of a loop
|
||
+ * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
|
||
+ * the DMA address goes back to the beginning of this area.
|
||
+ * In loop mode, the DMA data region is divided into two parts, BUFA
|
||
+ * and BUFB. DMA controller generates interrupts twice in each loop:
|
||
+ * when the DMA address reaches the end of BUFA or the end of the
|
||
+ * BUFB
|
||
+ */
|
||
+ if (buf_len != 2 * period_len)
|
||
+ return ERR_PTR(-EINVAL);
|
||
+
|
||
+ /* Get free descriptor */
|
||
+ spin_lock_irqsave(&schan->lock, iflags);
|
||
+ if (!list_empty(&schan->free)) {
|
||
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
|
||
+ node);
|
||
+ list_del(&sdesc->node);
|
||
+ }
|
||
+ spin_unlock_irqrestore(&schan->lock, iflags);
|
||
+
|
||
+ if (!sdesc)
|
||
+ return 0;
|
||
+
|
||
+ /* Place descriptor in prepared list */
|
||
+ spin_lock_irqsave(&schan->lock, iflags);
|
||
+ sdesc->addr = addr;
|
||
+ sdesc->cyclic = 1;
|
||
+ sdesc->xlen = 0;
|
||
+ sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
|
||
+ sdesc->width = 1;
|
||
+ list_add_tail(&sdesc->node, &schan->prepared);
|
||
+ spin_unlock_irqrestore(&schan->lock, iflags);
|
||
+
|
||
+ return &sdesc->desc;
|
||
+}
|
||
+
|
||
+/*
|
||
+ * The DMA controller consists of 16 independent DMA channels.
|
||
+ * Each channel is allocated to a different function
|
||
+ */
|
||
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
|
||
+{
|
||
+ unsigned int ch_nr = (unsigned int) chan_id;
|
||
+
|
||
+ if (ch_nr == chan->chan_id +
|
||
+ chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
|
||
+ return true;
|
||
+
|
||
+ return false;
|
||
+}
|
||
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
|
||
+
|
||
+static int sirfsoc_dma_probe(struct platform_device *op)
|
||
+{
|
||
+ struct device_node *dn = op->dev.of_node;
|
||
+ struct device *dev = &op->dev;
|
||
+ struct dma_device *dma;
|
||
+ struct sirfsoc_dma *sdma;
|
||
+ struct sirfsoc_dma_chan *schan;
|
||
+ struct resource res;
|
||
+ ulong regs_start, regs_size;
|
||
+ u32 id;
|
||
+ int ret, i;
|
||
+
|
||
+ sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
|
||
+ if (!sdma) {
|
||
+ dev_err(dev, "Memory exhausted!\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ if (of_device_is_compatible(dn, "sirf,marco-dmac"))
|
||
+ sdma->is_marco = true;
|
||
+
|
||
+ if (of_property_read_u32(dn, "cell-index", &id)) {
|
||
+ dev_err(dev, "Fail to get DMAC index\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+
|
||
+ sdma->irq = irq_of_parse_and_map(dn, 0);
|
||
+ if (sdma->irq == NO_IRQ) {
|
||
+ dev_err(dev, "Error mapping IRQ!\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ sdma->clk = devm_clk_get(dev, NULL);
|
||
+ if (IS_ERR(sdma->clk)) {
|
||
+ dev_err(dev, "failed to get a clock.\n");
|
||
+ return PTR_ERR(sdma->clk);
|
||
+ }
|
||
+
|
||
+ ret = of_address_to_resource(dn, 0, &res);
|
||
+ if (ret) {
|
||
+ dev_err(dev, "Error parsing memory region!\n");
|
||
+ goto irq_dispose;
|
||
+ }
|
||
+
|
||
+ regs_start = res.start;
|
||
+ regs_size = resource_size(&res);
|
||
+
|
||
+ sdma->base = devm_ioremap(dev, regs_start, regs_size);
|
||
+ if (!sdma->base) {
|
||
+ dev_err(dev, "Error mapping memory region!\n");
|
||
+ ret = -ENOMEM;
|
||
+ goto irq_dispose;
|
||
+ }
|
||
+
|
||
+ ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
|
||
+ if (ret) {
|
||
+ dev_err(dev, "Error requesting IRQ!\n");
|
||
+ ret = -EINVAL;
|
||
+ goto irq_dispose;
|
||
+ }
|
||
+
|
||
+ dma = &sdma->dma;
|
||
+ dma->dev = dev;
|
||
+ dma->chancnt = SIRFSOC_DMA_CHANNELS;
|
||
+
|
||
+ dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
|
||
+ dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
|
||
+ dma->device_issue_pending = sirfsoc_dma_issue_pending;
|
||
+ dma->device_control = sirfsoc_dma_control;
|
||
+ dma->device_tx_status = sirfsoc_dma_tx_status;
|
||
+ dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
|
||
+ dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
|
||
+
|
||
+ INIT_LIST_HEAD(&dma->channels);
|
||
+ dma_cap_set(DMA_SLAVE, dma->cap_mask);
|
||
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
|
||
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
|
||
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
|
||
+
|
||
+ for (i = 0; i < dma->chancnt; i++) {
|
||
+ schan = &sdma->channels[i];
|
||
+
|
||
+ schan->chan.device = dma;
|
||
+ dma_cookie_init(&schan->chan);
|
||
+
|
||
+ INIT_LIST_HEAD(&schan->free);
|
||
+ INIT_LIST_HEAD(&schan->prepared);
|
||
+ INIT_LIST_HEAD(&schan->queued);
|
||
+ INIT_LIST_HEAD(&schan->active);
|
||
+ INIT_LIST_HEAD(&schan->completed);
|
||
+
|
||
+ spin_lock_init(&schan->lock);
|
||
+ list_add_tail(&schan->chan.device_node, &dma->channels);
|
||
+ }
|
||
+
|
||
+ tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
|
||
+
|
||
+ clk_prepare_enable(sdma->clk);
|
||
+
|
||
+ /* Register DMA engine */
|
||
+ dev_set_drvdata(dev, sdma);
|
||
+ ret = dma_async_device_register(dma);
|
||
+ if (ret)
|
||
+ goto free_irq;
|
||
+
|
||
+ dev_info(dev, "initialized SIRFSOC DMAC driver\n");
|
||
+
|
||
+ return 0;
|
||
+
|
||
+free_irq:
|
||
+ free_irq(sdma->irq, sdma);
|
||
+irq_dispose:
|
||
+ irq_dispose_mapping(sdma->irq);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int sirfsoc_dma_remove(struct platform_device *op)
|
||
+{
|
||
+ struct device *dev = &op->dev;
|
||
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
|
||
+
|
||
+ clk_disable_unprepare(sdma->clk);
|
||
+ dma_async_device_unregister(&sdma->dma);
|
||
+ free_irq(sdma->irq, sdma);
|
||
+ irq_dispose_mapping(sdma->irq);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct of_device_id sirfsoc_dma_match[] = {
|
||
+ { .compatible = "sirf,prima2-dmac", },
|
||
+ { .compatible = "sirf,marco-dmac", },
|
||
+ {},
|
||
+};
|
||
+
|
||
+static struct platform_driver sirfsoc_dma_driver = {
|
||
+ .probe = sirfsoc_dma_probe,
|
||
+ .remove = sirfsoc_dma_remove,
|
||
+ .driver = {
|
||
+ .name = DRV_NAME,
|
||
+ .owner = THIS_MODULE,
|
||
+ .of_match_table = sirfsoc_dma_match,
|
||
+ },
|
||
+};
|
||
+
|
||
+static __init int sirfsoc_dma_init(void)
|
||
+{
|
||
+ return platform_driver_register(&sirfsoc_dma_driver);
|
||
+}
|
||
+
|
||
+static void __exit sirfsoc_dma_exit(void)
|
||
+{
|
||
+ platform_driver_unregister(&sirfsoc_dma_driver);
|
||
+}
|
||
+
|
||
+subsys_initcall(sirfsoc_dma_init);
|
||
+module_exit(sirfsoc_dma_exit);
|
||
+
|
||
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
|
||
+ "Barry Song <baohua.song@csr.com>");
|
||
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
|
||
+MODULE_LICENSE("GPL v2");
|
||
diff -urN linux-3.0.101/drivers/dma/tegra20-apb-dma.c linux-3.0.101.xm510/drivers/dma/tegra20-apb-dma.c
|
||
--- linux-3.0.101/drivers/dma/tegra20-apb-dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/tegra20-apb-dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,1527 @@
|
||
+/*
|
||
+ * DMA driver for Nvidia's Tegra20 APB DMA controller.
|
||
+ *
|
||
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms and conditions of the GNU General Public License,
|
||
+ * version 2, as published by the Free Software Foundation.
|
||
+ *
|
||
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ * more details.
|
||
+ *
|
||
+ * You should have received a copy of the GNU General Public License
|
||
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||
+ */
|
||
+
|
||
+#include <linux/bitops.h>
|
||
+#include <linux/clk.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/mm.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/of.h>
|
||
+#include <linux/of_device.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/pm.h>
|
||
+#include <linux/pm_runtime.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/clk/tegra.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+
|
||
+#define TEGRA_APBDMA_GENERAL 0x0
|
||
+#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
|
||
+
|
||
+#define TEGRA_APBDMA_CONTROL 0x010
|
||
+#define TEGRA_APBDMA_IRQ_MASK 0x01c
|
||
+#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
|
||
+
|
||
+/* CSR register */
|
||
+#define TEGRA_APBDMA_CHAN_CSR 0x00
|
||
+#define TEGRA_APBDMA_CSR_ENB BIT(31)
|
||
+#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
|
||
+#define TEGRA_APBDMA_CSR_HOLD BIT(29)
|
||
+#define TEGRA_APBDMA_CSR_DIR BIT(28)
|
||
+#define TEGRA_APBDMA_CSR_ONCE BIT(27)
|
||
+#define TEGRA_APBDMA_CSR_FLOW BIT(21)
|
||
+#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
|
||
+#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
|
||
+
|
||
+/* STATUS register */
|
||
+#define TEGRA_APBDMA_CHAN_STATUS 0x004
|
||
+#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
|
||
+#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
|
||
+#define TEGRA_APBDMA_STATUS_HALT BIT(29)
|
||
+#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
|
||
+#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
|
||
+#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
|
||
+
|
||
+#define TEGRA_APBDMA_CHAN_CSRE 0x00C
|
||
+#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
|
||
+
|
||
+/* AHB memory address */
|
||
+#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
|
||
+
|
||
+/* AHB sequence register */
|
||
+#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
|
||
+#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
|
||
+#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
|
||
+#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
|
||
+#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
|
||
+#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
|
||
+#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
|
||
+
|
||
+/* APB address */
|
||
+#define TEGRA_APBDMA_CHAN_APBPTR 0x018
|
||
+
|
||
+/* APB sequence register */
|
||
+#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
|
||
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
|
||
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
|
||
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
|
||
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
|
||
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
|
||
+#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
|
||
+#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
|
||
+
|
||
+/*
|
||
+ * If any burst is in flight and DMA paused then this is the time to complete
|
||
+ * on-flight burst and update DMA status register.
|
||
+ */
|
||
+#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
|
||
+
|
||
+/* Channel base address offset from APBDMA base address */
|
||
+#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
|
||
+
|
||
+/* DMA channel register space size */
|
||
+#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
|
||
+
|
||
+struct tegra_dma;
|
||
+
|
||
+/*
|
||
+ * tegra_dma_chip_data Tegra chip specific DMA data
|
||
+ * @nr_channels: Number of channels available in the controller.
|
||
+ * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
|
||
+ * @support_channel_pause: Support channel wise pause of dma.
|
||
+ */
|
||
+struct tegra_dma_chip_data {
|
||
+ int nr_channels;
|
||
+ int max_dma_count;
|
||
+ bool support_channel_pause;
|
||
+};
|
||
+
|
||
+/* DMA channel registers */
|
||
+struct tegra_dma_channel_regs {
|
||
+ unsigned long csr;
|
||
+ unsigned long ahb_ptr;
|
||
+ unsigned long apb_ptr;
|
||
+ unsigned long ahb_seq;
|
||
+ unsigned long apb_seq;
|
||
+};
|
||
+
|
||
+/*
|
||
+ * tegra_dma_sg_req: Dma request details to configure hardware. This
|
||
+ * contains the details for one transfer to configure DMA hw.
|
||
+ * The client's request for data transfer can be broken into multiple
|
||
+ * sub-transfer as per requester details and hw support.
|
||
+ * This sub transfer get added in the list of transfer and point to Tegra
|
||
+ * DMA descriptor which manages the transfer details.
|
||
+ */
|
||
+struct tegra_dma_sg_req {
|
||
+ struct tegra_dma_channel_regs ch_regs;
|
||
+ int req_len;
|
||
+ bool configured;
|
||
+ bool last_sg;
|
||
+ bool half_done;
|
||
+ struct list_head node;
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+};
|
||
+
|
||
+/*
|
||
+ * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
|
||
+ * This descriptor keep track of transfer status, callbacks and request
|
||
+ * counts etc.
|
||
+ */
|
||
+struct tegra_dma_desc {
|
||
+ struct dma_async_tx_descriptor txd;
|
||
+ int bytes_requested;
|
||
+ int bytes_transferred;
|
||
+ enum dma_status dma_status;
|
||
+ struct list_head node;
|
||
+ struct list_head tx_list;
|
||
+ struct list_head cb_node;
|
||
+ int cb_count;
|
||
+};
|
||
+
|
||
+struct tegra_dma_channel;
|
||
+
|
||
+typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
|
||
+ bool to_terminate);
|
||
+
|
||
+/* tegra_dma_channel: Channel specific information */
|
||
+struct tegra_dma_channel {
|
||
+ struct dma_chan dma_chan;
|
||
+ char name[30];
|
||
+ bool config_init;
|
||
+ int id;
|
||
+ int irq;
|
||
+ unsigned long chan_base_offset;
|
||
+ spinlock_t lock;
|
||
+ bool busy;
|
||
+ struct tegra_dma *tdma;
|
||
+ bool cyclic;
|
||
+
|
||
+ /* Different lists for managing the requests */
|
||
+ struct list_head free_sg_req;
|
||
+ struct list_head pending_sg_req;
|
||
+ struct list_head free_dma_desc;
|
||
+ struct list_head cb_desc;
|
||
+
|
||
+ /* ISR handler and tasklet for bottom half of isr handling */
|
||
+ dma_isr_handler isr_handler;
|
||
+ struct tasklet_struct tasklet;
|
||
+ dma_async_tx_callback callback;
|
||
+ void *callback_param;
|
||
+
|
||
+ /* Channel-slave specific configuration */
|
||
+ struct dma_slave_config dma_sconfig;
|
||
+ struct tegra_dma_channel_regs channel_reg;
|
||
+};
|
||
+
|
||
+/* tegra_dma: Tegra DMA specific information */
|
||
+struct tegra_dma {
|
||
+ struct dma_device dma_dev;
|
||
+ struct device *dev;
|
||
+ struct clk *dma_clk;
|
||
+ spinlock_t global_lock;
|
||
+ void __iomem *base_addr;
|
||
+ const struct tegra_dma_chip_data *chip_data;
|
||
+
|
||
+ /* Some register need to be cache before suspend */
|
||
+ u32 reg_gen;
|
||
+
|
||
+ /* Last member of the structure */
|
||
+ struct tegra_dma_channel channels[0];
|
||
+};
|
||
+
|
||
+static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
|
||
+{
|
||
+ writel(val, tdma->base_addr + reg);
|
||
+}
|
||
+
|
||
+static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
|
||
+{
|
||
+ return readl(tdma->base_addr + reg);
|
||
+}
|
||
+
|
||
+static inline void tdc_write(struct tegra_dma_channel *tdc,
|
||
+ u32 reg, u32 val)
|
||
+{
|
||
+ writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
|
||
+}
|
||
+
|
||
+static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
|
||
+{
|
||
+ return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
|
||
+}
|
||
+
|
||
+static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
|
||
+{
|
||
+ return container_of(dc, struct tegra_dma_channel, dma_chan);
|
||
+}
|
||
+
|
||
+static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
|
||
+ struct dma_async_tx_descriptor *td)
|
||
+{
|
||
+ return container_of(td, struct tegra_dma_desc, txd);
|
||
+}
|
||
+
|
||
+static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ return &tdc->dma_chan.dev->device;
|
||
+}
|
||
+
|
||
+static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
|
||
+static int tegra_dma_runtime_suspend(struct device *dev);
|
||
+static int tegra_dma_runtime_resume(struct device *dev);
|
||
+
|
||
+/* Get DMA desc from free list, if not there then allocate it. */
|
||
+static struct tegra_dma_desc *tegra_dma_desc_get(
|
||
+ struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+
|
||
+ /* Do not allocate if desc are waiting for ack */
|
||
+ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
|
||
+ if (async_tx_test_ack(&dma_desc->txd)) {
|
||
+ list_del(&dma_desc->node);
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ dma_desc->txd.flags = 0;
|
||
+ return dma_desc;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+
|
||
+ /* Allocate DMA desc */
|
||
+ dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
|
||
+ if (!dma_desc) {
|
||
+ dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
|
||
+ dma_desc->txd.tx_submit = tegra_dma_tx_submit;
|
||
+ dma_desc->txd.flags = 0;
|
||
+ return dma_desc;
|
||
+}
|
||
+
|
||
+static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
|
||
+ struct tegra_dma_desc *dma_desc)
|
||
+{
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ if (!list_empty(&dma_desc->tx_list))
|
||
+ list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
|
||
+ list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+}
|
||
+
|
||
+static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
|
||
+ struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ struct tegra_dma_sg_req *sg_req = NULL;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ if (!list_empty(&tdc->free_sg_req)) {
|
||
+ sg_req = list_first_entry(&tdc->free_sg_req,
|
||
+ typeof(*sg_req), node);
|
||
+ list_del(&sg_req->node);
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return sg_req;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+
|
||
+ sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
|
||
+ if (!sg_req)
|
||
+ dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
|
||
+ return sg_req;
|
||
+}
|
||
+
|
||
+static int tegra_dma_slave_config(struct dma_chan *dc,
|
||
+ struct dma_slave_config *sconfig)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+
|
||
+ if (!list_empty(&tdc->pending_sg_req)) {
|
||
+ dev_err(tdc2dev(tdc), "Configuration not allowed\n");
|
||
+ return -EBUSY;
|
||
+ }
|
||
+
|
||
+ memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
|
||
+ tdc->config_init = true;
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
|
||
+ bool wait_for_burst_complete)
|
||
+{
|
||
+ struct tegra_dma *tdma = tdc->tdma;
|
||
+
|
||
+ spin_lock(&tdma->global_lock);
|
||
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
|
||
+ if (wait_for_burst_complete)
|
||
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
|
||
+}
|
||
+
|
||
+static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ struct tegra_dma *tdma = tdc->tdma;
|
||
+
|
||
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
|
||
+ spin_unlock(&tdma->global_lock);
|
||
+}
|
||
+
|
||
+static void tegra_dma_pause(struct tegra_dma_channel *tdc,
|
||
+ bool wait_for_burst_complete)
|
||
+{
|
||
+ struct tegra_dma *tdma = tdc->tdma;
|
||
+
|
||
+ if (tdma->chip_data->support_channel_pause) {
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
|
||
+ TEGRA_APBDMA_CHAN_CSRE_PAUSE);
|
||
+ if (wait_for_burst_complete)
|
||
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
|
||
+ } else {
|
||
+ tegra_dma_global_pause(tdc, wait_for_burst_complete);
|
||
+ }
|
||
+}
|
||
+
|
||
+static void tegra_dma_resume(struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ struct tegra_dma *tdma = tdc->tdma;
|
||
+
|
||
+ if (tdma->chip_data->support_channel_pause) {
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
|
||
+ } else {
|
||
+ tegra_dma_global_resume(tdc);
|
||
+ }
|
||
+}
|
||
+
|
||
+static void tegra_dma_stop(struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ u32 csr;
|
||
+ u32 status;
|
||
+
|
||
+ /* Disable interrupts */
|
||
+ csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
|
||
+ csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
|
||
+
|
||
+ /* Disable DMA */
|
||
+ csr &= ~TEGRA_APBDMA_CSR_ENB;
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
|
||
+
|
||
+ /* Clear interrupt status if it is there */
|
||
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
||
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
||
+ dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
|
||
+ }
|
||
+ tdc->busy = false;
|
||
+}
|
||
+
|
||
+static void tegra_dma_start(struct tegra_dma_channel *tdc,
|
||
+ struct tegra_dma_sg_req *sg_req)
|
||
+{
|
||
+ struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
|
||
+
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
|
||
+
|
||
+ /* Start DMA */
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
|
||
+ ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
|
||
+}
|
||
+
|
||
+static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
|
||
+ struct tegra_dma_sg_req *nsg_req)
|
||
+{
|
||
+ unsigned long status;
|
||
+
|
||
+ /*
|
||
+ * The DMA controller reloads the new configuration for next transfer
|
||
+ * after last burst of current transfer completes.
|
||
+ * If there is no IEC status then this makes sure that last burst
|
||
+ * has not be completed. There may be case that last burst is on
|
||
+ * flight and so it can complete but because DMA is paused, it
|
||
+ * will not generates interrupt as well as not reload the new
|
||
+ * configuration.
|
||
+ * If there is already IEC status then interrupt handler need to
|
||
+ * load new configuration.
|
||
+ */
|
||
+ tegra_dma_pause(tdc, false);
|
||
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
||
+
|
||
+ /*
|
||
+ * If interrupt is pending then do nothing as the ISR will handle
|
||
+ * the programing for new request.
|
||
+ */
|
||
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
||
+ dev_err(tdc2dev(tdc),
|
||
+ "Skipping new configuration as interrupt is pending\n");
|
||
+ tegra_dma_resume(tdc);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ /* Safe to program new configuration */
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
|
||
+ nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
|
||
+ nsg_req->configured = true;
|
||
+
|
||
+ tegra_dma_resume(tdc);
|
||
+}
|
||
+
|
||
+static void tdc_start_head_req(struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ struct tegra_dma_sg_req *sg_req;
|
||
+
|
||
+ if (list_empty(&tdc->pending_sg_req))
|
||
+ return;
|
||
+
|
||
+ sg_req = list_first_entry(&tdc->pending_sg_req,
|
||
+ typeof(*sg_req), node);
|
||
+ tegra_dma_start(tdc, sg_req);
|
||
+ sg_req->configured = true;
|
||
+ tdc->busy = true;
|
||
+}
|
||
+
|
||
+static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ struct tegra_dma_sg_req *hsgreq;
|
||
+ struct tegra_dma_sg_req *hnsgreq;
|
||
+
|
||
+ if (list_empty(&tdc->pending_sg_req))
|
||
+ return;
|
||
+
|
||
+ hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
|
||
+ if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
|
||
+ hnsgreq = list_first_entry(&hsgreq->node,
|
||
+ typeof(*hnsgreq), node);
|
||
+ tegra_dma_configure_for_next(tdc, hnsgreq);
|
||
+ }
|
||
+}
|
||
+
|
||
+static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
|
||
+ struct tegra_dma_sg_req *sg_req, unsigned long status)
|
||
+{
|
||
+ return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
|
||
+}
|
||
+
|
||
+static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
|
||
+{
|
||
+ struct tegra_dma_sg_req *sgreq;
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+
|
||
+ while (!list_empty(&tdc->pending_sg_req)) {
|
||
+ sgreq = list_first_entry(&tdc->pending_sg_req,
|
||
+ typeof(*sgreq), node);
|
||
+ list_move_tail(&sgreq->node, &tdc->free_sg_req);
|
||
+ if (sgreq->last_sg) {
|
||
+ dma_desc = sgreq->dma_desc;
|
||
+ dma_desc->dma_status = DMA_ERROR;
|
||
+ list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
|
||
+
|
||
+ /* Add in cb list if it is not there. */
|
||
+ if (!dma_desc->cb_count)
|
||
+ list_add_tail(&dma_desc->cb_node,
|
||
+ &tdc->cb_desc);
|
||
+ dma_desc->cb_count++;
|
||
+ }
|
||
+ }
|
||
+ tdc->isr_handler = NULL;
|
||
+}
|
||
+
|
||
+static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
|
||
+ struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
|
||
+{
|
||
+ struct tegra_dma_sg_req *hsgreq = NULL;
|
||
+
|
||
+ if (list_empty(&tdc->pending_sg_req)) {
|
||
+ dev_err(tdc2dev(tdc), "Dma is running without req\n");
|
||
+ tegra_dma_stop(tdc);
|
||
+ return false;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Check that head req on list should be in flight.
|
||
+ * If it is not in flight then abort transfer as
|
||
+ * looping of transfer can not continue.
|
||
+ */
|
||
+ hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
|
||
+ if (!hsgreq->configured) {
|
||
+ tegra_dma_stop(tdc);
|
||
+ dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
|
||
+ tegra_dma_abort_all(tdc);
|
||
+ return false;
|
||
+ }
|
||
+
|
||
+ /* Configure next request */
|
||
+ if (!to_terminate)
|
||
+ tdc_configure_next_head_desc(tdc);
|
||
+ return true;
|
||
+}
|
||
+
|
||
+static void handle_once_dma_done(struct tegra_dma_channel *tdc,
|
||
+ bool to_terminate)
|
||
+{
|
||
+ struct tegra_dma_sg_req *sgreq;
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+
|
||
+ tdc->busy = false;
|
||
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
|
||
+ dma_desc = sgreq->dma_desc;
|
||
+ dma_desc->bytes_transferred += sgreq->req_len;
|
||
+
|
||
+ list_del(&sgreq->node);
|
||
+ if (sgreq->last_sg) {
|
||
+ dma_desc->dma_status = DMA_SUCCESS;
|
||
+ dma_cookie_complete(&dma_desc->txd);
|
||
+ if (!dma_desc->cb_count)
|
||
+ list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
|
||
+ dma_desc->cb_count++;
|
||
+ list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
|
||
+ }
|
||
+ list_add_tail(&sgreq->node, &tdc->free_sg_req);
|
||
+
|
||
+ /* Do not start DMA if it is going to be terminate */
|
||
+ if (to_terminate || list_empty(&tdc->pending_sg_req))
|
||
+ return;
|
||
+
|
||
+ tdc_start_head_req(tdc);
|
||
+ return;
|
||
+}
|
||
+
|
||
+static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
|
||
+ bool to_terminate)
|
||
+{
|
||
+ struct tegra_dma_sg_req *sgreq;
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+ bool st;
|
||
+
|
||
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
|
||
+ dma_desc = sgreq->dma_desc;
|
||
+ dma_desc->bytes_transferred += sgreq->req_len;
|
||
+
|
||
+ /* Callback need to be call */
|
||
+ if (!dma_desc->cb_count)
|
||
+ list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
|
||
+ dma_desc->cb_count++;
|
||
+
|
||
+ /* If not last req then put at end of pending list */
|
||
+ if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
|
||
+ list_move_tail(&sgreq->node, &tdc->pending_sg_req);
|
||
+ sgreq->configured = false;
|
||
+ st = handle_continuous_head_request(tdc, sgreq, to_terminate);
|
||
+ if (!st)
|
||
+ dma_desc->dma_status = DMA_ERROR;
|
||
+ }
|
||
+ return;
|
||
+}
|
||
+
|
||
+static void tegra_dma_tasklet(unsigned long data)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
|
||
+ dma_async_tx_callback callback = NULL;
|
||
+ void *callback_param = NULL;
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+ unsigned long flags;
|
||
+ int cb_count;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ while (!list_empty(&tdc->cb_desc)) {
|
||
+ dma_desc = list_first_entry(&tdc->cb_desc,
|
||
+ typeof(*dma_desc), cb_node);
|
||
+ list_del(&dma_desc->cb_node);
|
||
+ callback = dma_desc->txd.callback;
|
||
+ callback_param = dma_desc->txd.callback_param;
|
||
+ cb_count = dma_desc->cb_count;
|
||
+ dma_desc->cb_count = 0;
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ while (cb_count-- && callback)
|
||
+ callback(callback_param);
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ }
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+}
|
||
+
|
||
+static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = dev_id;
|
||
+ unsigned long status;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+
|
||
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
||
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
|
||
+ tdc->isr_handler(tdc, false);
|
||
+ tasklet_schedule(&tdc->tasklet);
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return IRQ_HANDLED;
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ dev_info(tdc2dev(tdc),
|
||
+ "Interrupt already served status 0x%08lx\n", status);
|
||
+ return IRQ_NONE;
|
||
+}
|
||
+
|
||
+static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
|
||
+{
|
||
+ struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
|
||
+ unsigned long flags;
|
||
+ dma_cookie_t cookie;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ dma_desc->dma_status = DMA_IN_PROGRESS;
|
||
+ cookie = dma_cookie_assign(&dma_desc->txd);
|
||
+ list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return cookie;
|
||
+}
|
||
+
|
||
+static void tegra_dma_issue_pending(struct dma_chan *dc)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ if (list_empty(&tdc->pending_sg_req)) {
|
||
+ dev_err(tdc2dev(tdc), "No DMA request\n");
|
||
+ goto end;
|
||
+ }
|
||
+ if (!tdc->busy) {
|
||
+ tdc_start_head_req(tdc);
|
||
+
|
||
+ /* Continuous single mode: Configure next req */
|
||
+ if (tdc->cyclic) {
|
||
+ /*
|
||
+ * Wait for 1 burst time for configure DMA for
|
||
+ * next transfer.
|
||
+ */
|
||
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
|
||
+ tdc_configure_next_head_desc(tdc);
|
||
+ }
|
||
+ }
|
||
+end:
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return;
|
||
+}
|
||
+
|
||
+static void tegra_dma_terminate_all(struct dma_chan *dc)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+ struct tegra_dma_sg_req *sgreq;
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+ unsigned long flags;
|
||
+ unsigned long status;
|
||
+ bool was_busy;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ if (list_empty(&tdc->pending_sg_req)) {
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ if (!tdc->busy)
|
||
+ goto skip_dma_stop;
|
||
+
|
||
+ /* Pause DMA before checking the queue status */
|
||
+ tegra_dma_pause(tdc, true);
|
||
+
|
||
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
||
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
||
+ dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
|
||
+ tdc->isr_handler(tdc, true);
|
||
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
|
||
+ }
|
||
+
|
||
+ was_busy = tdc->busy;
|
||
+ tegra_dma_stop(tdc);
|
||
+
|
||
+ if (!list_empty(&tdc->pending_sg_req) && was_busy) {
|
||
+ sgreq = list_first_entry(&tdc->pending_sg_req,
|
||
+ typeof(*sgreq), node);
|
||
+ sgreq->dma_desc->bytes_transferred +=
|
||
+ get_current_xferred_count(tdc, sgreq, status);
|
||
+ }
|
||
+ tegra_dma_resume(tdc);
|
||
+
|
||
+skip_dma_stop:
|
||
+ tegra_dma_abort_all(tdc);
|
||
+
|
||
+ while (!list_empty(&tdc->cb_desc)) {
|
||
+ dma_desc = list_first_entry(&tdc->cb_desc,
|
||
+ typeof(*dma_desc), cb_node);
|
||
+ list_del(&dma_desc->cb_node);
|
||
+ dma_desc->cb_count = 0;
|
||
+ }
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+}
|
||
+
|
||
+static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
|
||
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+ struct tegra_dma_sg_req *sg_req;
|
||
+ enum dma_status ret;
|
||
+ unsigned long flags;
|
||
+ unsigned int residual;
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+
|
||
+ ret = dma_cookie_status(dc, cookie, txstate);
|
||
+ if (ret == DMA_SUCCESS) {
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return ret;
|
||
+ }
|
||
+
|
||
+ /* Check on wait_ack desc status */
|
||
+ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
|
||
+ if (dma_desc->txd.cookie == cookie) {
|
||
+ residual = dma_desc->bytes_requested -
|
||
+ (dma_desc->bytes_transferred %
|
||
+ dma_desc->bytes_requested);
|
||
+ dma_set_residue(txstate, residual);
|
||
+ ret = dma_desc->dma_status;
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return ret;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Check in pending list */
|
||
+ list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
|
||
+ dma_desc = sg_req->dma_desc;
|
||
+ if (dma_desc->txd.cookie == cookie) {
|
||
+ residual = dma_desc->bytes_requested -
|
||
+ (dma_desc->bytes_transferred %
|
||
+ dma_desc->bytes_requested);
|
||
+ dma_set_residue(txstate, residual);
|
||
+ ret = dma_desc->dma_status;
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return ret;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
|
||
+ unsigned long arg)
|
||
+{
|
||
+ switch (cmd) {
|
||
+ case DMA_SLAVE_CONFIG:
|
||
+ return tegra_dma_slave_config(dc,
|
||
+ (struct dma_slave_config *)arg);
|
||
+
|
||
+ case DMA_TERMINATE_ALL:
|
||
+ tegra_dma_terminate_all(dc);
|
||
+ return 0;
|
||
+
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ return -ENXIO;
|
||
+}
|
||
+
|
||
+static inline int get_bus_width(struct tegra_dma_channel *tdc,
|
||
+ enum dma_slave_buswidth slave_bw)
|
||
+{
|
||
+ switch (slave_bw) {
|
||
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
|
||
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
|
||
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
|
||
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
|
||
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
|
||
+ default:
|
||
+ dev_warn(tdc2dev(tdc),
|
||
+ "slave bw is not supported, using 32bits\n");
|
||
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
|
||
+ }
|
||
+}
|
||
+
|
||
+static inline int get_burst_size(struct tegra_dma_channel *tdc,
|
||
+ u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
|
||
+{
|
||
+ int burst_byte;
|
||
+ int burst_ahb_width;
|
||
+
|
||
+ /*
|
||
+ * burst_size from client is in terms of the bus_width.
|
||
+ * convert them into AHB memory width which is 4 byte.
|
||
+ */
|
||
+ burst_byte = burst_size * slave_bw;
|
||
+ burst_ahb_width = burst_byte / 4;
|
||
+
|
||
+ /* If burst size is 0 then calculate the burst size based on length */
|
||
+ if (!burst_ahb_width) {
|
||
+ if (len & 0xF)
|
||
+ return TEGRA_APBDMA_AHBSEQ_BURST_1;
|
||
+ else if ((len >> 4) & 0x1)
|
||
+ return TEGRA_APBDMA_AHBSEQ_BURST_4;
|
||
+ else
|
||
+ return TEGRA_APBDMA_AHBSEQ_BURST_8;
|
||
+ }
|
||
+ if (burst_ahb_width < 4)
|
||
+ return TEGRA_APBDMA_AHBSEQ_BURST_1;
|
||
+ else if (burst_ahb_width < 8)
|
||
+ return TEGRA_APBDMA_AHBSEQ_BURST_4;
|
||
+ else
|
||
+ return TEGRA_APBDMA_AHBSEQ_BURST_8;
|
||
+}
|
||
+
|
||
+static int get_transfer_param(struct tegra_dma_channel *tdc,
|
||
+ enum dma_transfer_direction direction, unsigned long *apb_addr,
|
||
+ unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
|
||
+ enum dma_slave_buswidth *slave_bw)
|
||
+{
|
||
+
|
||
+ switch (direction) {
|
||
+ case DMA_MEM_TO_DEV:
|
||
+ *apb_addr = tdc->dma_sconfig.dst_addr;
|
||
+ *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
|
||
+ *burst_size = tdc->dma_sconfig.dst_maxburst;
|
||
+ *slave_bw = tdc->dma_sconfig.dst_addr_width;
|
||
+ *csr = TEGRA_APBDMA_CSR_DIR;
|
||
+ return 0;
|
||
+
|
||
+ case DMA_DEV_TO_MEM:
|
||
+ *apb_addr = tdc->dma_sconfig.src_addr;
|
||
+ *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
|
||
+ *burst_size = tdc->dma_sconfig.src_maxburst;
|
||
+ *slave_bw = tdc->dma_sconfig.src_addr_width;
|
||
+ *csr = 0;
|
||
+ return 0;
|
||
+
|
||
+ default:
|
||
+ dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+ return -EINVAL;
|
||
+}
|
||
+
|
||
+static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
|
||
+ struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
|
||
+ enum dma_transfer_direction direction, unsigned long flags,
|
||
+ void *context)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+ unsigned int i;
|
||
+ struct scatterlist *sg;
|
||
+ unsigned long csr, ahb_seq, apb_ptr, apb_seq;
|
||
+ struct list_head req_list;
|
||
+ struct tegra_dma_sg_req *sg_req = NULL;
|
||
+ u32 burst_size;
|
||
+ enum dma_slave_buswidth slave_bw;
|
||
+ int ret;
|
||
+
|
||
+ if (!tdc->config_init) {
|
||
+ dev_err(tdc2dev(tdc), "dma channel is not configured\n");
|
||
+ return NULL;
|
||
+ }
|
||
+ if (sg_len < 1) {
|
||
+ dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
|
||
+ &burst_size, &slave_bw);
|
||
+ if (ret < 0)
|
||
+ return NULL;
|
||
+
|
||
+ INIT_LIST_HEAD(&req_list);
|
||
+
|
||
+ ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
|
||
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
|
||
+ TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
|
||
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
|
||
+
|
||
+ csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
|
||
+ csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||
+ if (flags & DMA_PREP_INTERRUPT)
|
||
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||
+
|
||
+ apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||
+
|
||
+ dma_desc = tegra_dma_desc_get(tdc);
|
||
+ if (!dma_desc) {
|
||
+ dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
|
||
+ return NULL;
|
||
+ }
|
||
+ INIT_LIST_HEAD(&dma_desc->tx_list);
|
||
+ INIT_LIST_HEAD(&dma_desc->cb_node);
|
||
+ dma_desc->cb_count = 0;
|
||
+ dma_desc->bytes_requested = 0;
|
||
+ dma_desc->bytes_transferred = 0;
|
||
+ dma_desc->dma_status = DMA_IN_PROGRESS;
|
||
+
|
||
+ /* Make transfer requests */
|
||
+ for_each_sg(sgl, sg, sg_len, i) {
|
||
+ u32 len, mem;
|
||
+
|
||
+ mem = sg_dma_address(sg);
|
||
+ len = sg_dma_len(sg);
|
||
+
|
||
+ if ((len & 3) || (mem & 3) ||
|
||
+ (len > tdc->tdma->chip_data->max_dma_count)) {
|
||
+ dev_err(tdc2dev(tdc),
|
||
+ "Dma length/memory address is not supported\n");
|
||
+ tegra_dma_desc_put(tdc, dma_desc);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ sg_req = tegra_dma_sg_req_get(tdc);
|
||
+ if (!sg_req) {
|
||
+ dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
|
||
+ tegra_dma_desc_put(tdc, dma_desc);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
|
||
+ dma_desc->bytes_requested += len;
|
||
+
|
||
+ sg_req->ch_regs.apb_ptr = apb_ptr;
|
||
+ sg_req->ch_regs.ahb_ptr = mem;
|
||
+ sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
|
||
+ sg_req->ch_regs.apb_seq = apb_seq;
|
||
+ sg_req->ch_regs.ahb_seq = ahb_seq;
|
||
+ sg_req->configured = false;
|
||
+ sg_req->last_sg = false;
|
||
+ sg_req->dma_desc = dma_desc;
|
||
+ sg_req->req_len = len;
|
||
+
|
||
+ list_add_tail(&sg_req->node, &dma_desc->tx_list);
|
||
+ }
|
||
+ sg_req->last_sg = true;
|
||
+ if (flags & DMA_CTRL_ACK)
|
||
+ dma_desc->txd.flags = DMA_CTRL_ACK;
|
||
+
|
||
+ /*
|
||
+ * Make sure that mode should not be conflicting with currently
|
||
+ * configured mode.
|
||
+ */
|
||
+ if (!tdc->isr_handler) {
|
||
+ tdc->isr_handler = handle_once_dma_done;
|
||
+ tdc->cyclic = false;
|
||
+ } else {
|
||
+ if (tdc->cyclic) {
|
||
+ dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
|
||
+ tegra_dma_desc_put(tdc, dma_desc);
|
||
+ return NULL;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return &dma_desc->txd;
|
||
+}
|
||
+
|
||
+struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
|
||
+ struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
|
||
+ size_t period_len, enum dma_transfer_direction direction,
|
||
+ unsigned long flags, void *context)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+ struct tegra_dma_desc *dma_desc = NULL;
|
||
+ struct tegra_dma_sg_req *sg_req = NULL;
|
||
+ unsigned long csr, ahb_seq, apb_ptr, apb_seq;
|
||
+ int len;
|
||
+ size_t remain_len;
|
||
+ dma_addr_t mem = buf_addr;
|
||
+ u32 burst_size;
|
||
+ enum dma_slave_buswidth slave_bw;
|
||
+ int ret;
|
||
+
|
||
+ if (!buf_len || !period_len) {
|
||
+ dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ if (!tdc->config_init) {
|
||
+ dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * We allow to take more number of requests till DMA is
|
||
+ * not started. The driver will loop over all requests.
|
||
+ * Once DMA is started then new requests can be queued only after
|
||
+ * terminating the DMA.
|
||
+ */
|
||
+ if (tdc->busy) {
|
||
+ dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * We only support cycle transfer when buf_len is multiple of
|
||
+ * period_len.
|
||
+ */
|
||
+ if (buf_len % period_len) {
|
||
+ dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ len = period_len;
|
||
+ if ((len & 3) || (buf_addr & 3) ||
|
||
+ (len > tdc->tdma->chip_data->max_dma_count)) {
|
||
+ dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
|
||
+ &burst_size, &slave_bw);
|
||
+ if (ret < 0)
|
||
+ return NULL;
|
||
+
|
||
+
|
||
+ ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
|
||
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
|
||
+ TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
|
||
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
|
||
+
|
||
+ csr |= TEGRA_APBDMA_CSR_FLOW;
|
||
+ if (flags & DMA_PREP_INTERRUPT)
|
||
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||
+ csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||
+
|
||
+ apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||
+
|
||
+ dma_desc = tegra_dma_desc_get(tdc);
|
||
+ if (!dma_desc) {
|
||
+ dev_err(tdc2dev(tdc), "not enough descriptors available\n");
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ INIT_LIST_HEAD(&dma_desc->tx_list);
|
||
+ INIT_LIST_HEAD(&dma_desc->cb_node);
|
||
+ dma_desc->cb_count = 0;
|
||
+
|
||
+ dma_desc->bytes_transferred = 0;
|
||
+ dma_desc->bytes_requested = buf_len;
|
||
+ remain_len = buf_len;
|
||
+
|
||
+ /* Split transfer equal to period size */
|
||
+ while (remain_len) {
|
||
+ sg_req = tegra_dma_sg_req_get(tdc);
|
||
+ if (!sg_req) {
|
||
+ dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
|
||
+ tegra_dma_desc_put(tdc, dma_desc);
|
||
+ return NULL;
|
||
+ }
|
||
+
|
||
+ ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
|
||
+ sg_req->ch_regs.apb_ptr = apb_ptr;
|
||
+ sg_req->ch_regs.ahb_ptr = mem;
|
||
+ sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
|
||
+ sg_req->ch_regs.apb_seq = apb_seq;
|
||
+ sg_req->ch_regs.ahb_seq = ahb_seq;
|
||
+ sg_req->configured = false;
|
||
+ sg_req->half_done = false;
|
||
+ sg_req->last_sg = false;
|
||
+ sg_req->dma_desc = dma_desc;
|
||
+ sg_req->req_len = len;
|
||
+
|
||
+ list_add_tail(&sg_req->node, &dma_desc->tx_list);
|
||
+ remain_len -= len;
|
||
+ mem += len;
|
||
+ }
|
||
+ sg_req->last_sg = true;
|
||
+ if (flags & DMA_CTRL_ACK)
|
||
+ dma_desc->txd.flags = DMA_CTRL_ACK;
|
||
+
|
||
+ /*
|
||
+ * Make sure that mode should not be conflicting with currently
|
||
+ * configured mode.
|
||
+ */
|
||
+ if (!tdc->isr_handler) {
|
||
+ tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
|
||
+ tdc->cyclic = true;
|
||
+ } else {
|
||
+ if (!tdc->cyclic) {
|
||
+ dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
|
||
+ tegra_dma_desc_put(tdc, dma_desc);
|
||
+ return NULL;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return &dma_desc->txd;
|
||
+}
|
||
+
|
||
+static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+ struct tegra_dma *tdma = tdc->tdma;
|
||
+ int ret;
|
||
+
|
||
+ dma_cookie_init(&tdc->dma_chan);
|
||
+ tdc->config_init = false;
|
||
+ ret = clk_prepare_enable(tdma->dma_clk);
|
||
+ if (ret < 0)
|
||
+ dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void tegra_dma_free_chan_resources(struct dma_chan *dc)
|
||
+{
|
||
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||
+ struct tegra_dma *tdma = tdc->tdma;
|
||
+
|
||
+ struct tegra_dma_desc *dma_desc;
|
||
+ struct tegra_dma_sg_req *sg_req;
|
||
+ struct list_head dma_desc_list;
|
||
+ struct list_head sg_req_list;
|
||
+ unsigned long flags;
|
||
+
|
||
+ INIT_LIST_HEAD(&dma_desc_list);
|
||
+ INIT_LIST_HEAD(&sg_req_list);
|
||
+
|
||
+ dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
|
||
+
|
||
+ if (tdc->busy)
|
||
+ tegra_dma_terminate_all(dc);
|
||
+
|
||
+ spin_lock_irqsave(&tdc->lock, flags);
|
||
+ list_splice_init(&tdc->pending_sg_req, &sg_req_list);
|
||
+ list_splice_init(&tdc->free_sg_req, &sg_req_list);
|
||
+ list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
|
||
+ INIT_LIST_HEAD(&tdc->cb_desc);
|
||
+ tdc->config_init = false;
|
||
+ tdc->isr_handler = NULL;
|
||
+ spin_unlock_irqrestore(&tdc->lock, flags);
|
||
+
|
||
+ while (!list_empty(&dma_desc_list)) {
|
||
+ dma_desc = list_first_entry(&dma_desc_list,
|
||
+ typeof(*dma_desc), node);
|
||
+ list_del(&dma_desc->node);
|
||
+ kfree(dma_desc);
|
||
+ }
|
||
+
|
||
+ while (!list_empty(&sg_req_list)) {
|
||
+ sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
|
||
+ list_del(&sg_req->node);
|
||
+ kfree(sg_req);
|
||
+ }
|
||
+ clk_disable_unprepare(tdma->dma_clk);
|
||
+}
|
||
+
|
||
+/* Tegra20 specific DMA controller information */
|
||
+static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
|
||
+ .nr_channels = 16,
|
||
+ .max_dma_count = 1024UL * 64,
|
||
+ .support_channel_pause = false,
|
||
+};
|
||
+
|
||
+/* Tegra30 specific DMA controller information */
|
||
+static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
|
||
+ .nr_channels = 32,
|
||
+ .max_dma_count = 1024UL * 64,
|
||
+ .support_channel_pause = false,
|
||
+};
|
||
+
|
||
+/* Tegra114 specific DMA controller information */
|
||
+static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
|
||
+ .nr_channels = 32,
|
||
+ .max_dma_count = 1024UL * 64,
|
||
+ .support_channel_pause = true,
|
||
+};
|
||
+
|
||
+
|
||
+static const struct of_device_id tegra_dma_of_match[] = {
|
||
+ {
|
||
+ .compatible = "nvidia,tegra114-apbdma",
|
||
+ .data = &tegra114_dma_chip_data,
|
||
+ }, {
|
||
+ .compatible = "nvidia,tegra30-apbdma",
|
||
+ .data = &tegra30_dma_chip_data,
|
||
+ }, {
|
||
+ .compatible = "nvidia,tegra20-apbdma",
|
||
+ .data = &tegra20_dma_chip_data,
|
||
+ }, {
|
||
+ },
|
||
+};
|
||
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
|
||
+
|
||
+static int tegra_dma_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct resource *res;
|
||
+ struct tegra_dma *tdma;
|
||
+ int ret;
|
||
+ int i;
|
||
+ const struct tegra_dma_chip_data *cdata = NULL;
|
||
+ const struct of_device_id *match;
|
||
+
|
||
+ match = of_match_device(tegra_dma_of_match, &pdev->dev);
|
||
+ if (!match) {
|
||
+ dev_err(&pdev->dev, "Error: No device match found\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ cdata = match->data;
|
||
+
|
||
+ tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
|
||
+ sizeof(struct tegra_dma_channel), GFP_KERNEL);
|
||
+ if (!tdma) {
|
||
+ dev_err(&pdev->dev, "Error: memory allocation failed\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ tdma->dev = &pdev->dev;
|
||
+ tdma->chip_data = cdata;
|
||
+ platform_set_drvdata(pdev, tdma);
|
||
+
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
|
||
+ if (IS_ERR(tdma->base_addr))
|
||
+ return PTR_ERR(tdma->base_addr);
|
||
+
|
||
+ tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
|
||
+ if (IS_ERR(tdma->dma_clk)) {
|
||
+ dev_err(&pdev->dev, "Error: Missing controller clock\n");
|
||
+ return PTR_ERR(tdma->dma_clk);
|
||
+ }
|
||
+
|
||
+ spin_lock_init(&tdma->global_lock);
|
||
+
|
||
+ pm_runtime_enable(&pdev->dev);
|
||
+ if (!pm_runtime_enabled(&pdev->dev)) {
|
||
+ ret = tegra_dma_runtime_resume(&pdev->dev);
|
||
+ if (ret) {
|
||
+ dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
|
||
+ ret);
|
||
+ goto err_pm_disable;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Enable clock before accessing registers */
|
||
+ ret = clk_prepare_enable(tdma->dma_clk);
|
||
+ if (ret < 0) {
|
||
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
|
||
+ goto err_pm_disable;
|
||
+ }
|
||
+
|
||
+ /* Reset DMA controller */
|
||
+ tegra_periph_reset_assert(tdma->dma_clk);
|
||
+ udelay(2);
|
||
+ tegra_periph_reset_deassert(tdma->dma_clk);
|
||
+
|
||
+ /* Enable global DMA registers */
|
||
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
|
||
+ tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
|
||
+ tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
|
||
+
|
||
+ clk_disable_unprepare(tdma->dma_clk);
|
||
+
|
||
+ INIT_LIST_HEAD(&tdma->dma_dev.channels);
|
||
+ for (i = 0; i < cdata->nr_channels; i++) {
|
||
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
|
||
+
|
||
+ tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
|
||
+ i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
|
||
+
|
||
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
|
||
+ if (!res) {
|
||
+ ret = -EINVAL;
|
||
+ dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
|
||
+ goto err_irq;
|
||
+ }
|
||
+ tdc->irq = res->start;
|
||
+ snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
|
||
+ ret = devm_request_irq(&pdev->dev, tdc->irq,
|
||
+ tegra_dma_isr, 0, tdc->name, tdc);
|
||
+ if (ret) {
|
||
+ dev_err(&pdev->dev,
|
||
+ "request_irq failed with err %d channel %d\n",
|
||
+ ret, i);
|
||
+ goto err_irq;
|
||
+ }
|
||
+
|
||
+ tdc->dma_chan.device = &tdma->dma_dev;
|
||
+ dma_cookie_init(&tdc->dma_chan);
|
||
+ list_add_tail(&tdc->dma_chan.device_node,
|
||
+ &tdma->dma_dev.channels);
|
||
+ tdc->tdma = tdma;
|
||
+ tdc->id = i;
|
||
+
|
||
+ tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
|
||
+ (unsigned long)tdc);
|
||
+ spin_lock_init(&tdc->lock);
|
||
+
|
||
+ INIT_LIST_HEAD(&tdc->pending_sg_req);
|
||
+ INIT_LIST_HEAD(&tdc->free_sg_req);
|
||
+ INIT_LIST_HEAD(&tdc->free_dma_desc);
|
||
+ INIT_LIST_HEAD(&tdc->cb_desc);
|
||
+ }
|
||
+
|
||
+ dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
|
||
+ dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
|
||
+ dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
|
||
+
|
||
+ tdma->dma_dev.dev = &pdev->dev;
|
||
+ tdma->dma_dev.device_alloc_chan_resources =
|
||
+ tegra_dma_alloc_chan_resources;
|
||
+ tdma->dma_dev.device_free_chan_resources =
|
||
+ tegra_dma_free_chan_resources;
|
||
+ tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
|
||
+ tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
|
||
+ tdma->dma_dev.device_control = tegra_dma_device_control;
|
||
+ tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
|
||
+ tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
|
||
+
|
||
+ ret = dma_async_device_register(&tdma->dma_dev);
|
||
+ if (ret < 0) {
|
||
+ dev_err(&pdev->dev,
|
||
+ "Tegra20 APB DMA driver registration failed %d\n", ret);
|
||
+ goto err_irq;
|
||
+ }
|
||
+
|
||
+ dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
|
||
+ cdata->nr_channels);
|
||
+ return 0;
|
||
+
|
||
+err_irq:
|
||
+ while (--i >= 0) {
|
||
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
|
||
+ tasklet_kill(&tdc->tasklet);
|
||
+ }
|
||
+
|
||
+err_pm_disable:
|
||
+ pm_runtime_disable(&pdev->dev);
|
||
+ if (!pm_runtime_status_suspended(&pdev->dev))
|
||
+ tegra_dma_runtime_suspend(&pdev->dev);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int tegra_dma_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
|
||
+ int i;
|
||
+ struct tegra_dma_channel *tdc;
|
||
+
|
||
+ dma_async_device_unregister(&tdma->dma_dev);
|
||
+
|
||
+ for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
|
||
+ tdc = &tdma->channels[i];
|
||
+ tasklet_kill(&tdc->tasklet);
|
||
+ }
|
||
+
|
||
+ pm_runtime_disable(&pdev->dev);
|
||
+ if (!pm_runtime_status_suspended(&pdev->dev))
|
||
+ tegra_dma_runtime_suspend(&pdev->dev);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int tegra_dma_runtime_suspend(struct device *dev)
|
||
+{
|
||
+ struct platform_device *pdev = to_platform_device(dev);
|
||
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
|
||
+
|
||
+ clk_disable_unprepare(tdma->dma_clk);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int tegra_dma_runtime_resume(struct device *dev)
|
||
+{
|
||
+ struct platform_device *pdev = to_platform_device(dev);
|
||
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
|
||
+ int ret;
|
||
+
|
||
+ ret = clk_prepare_enable(tdma->dma_clk);
|
||
+ if (ret < 0) {
|
||
+ dev_err(dev, "clk_enable failed: %d\n", ret);
|
||
+ return ret;
|
||
+ }
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+#ifdef CONFIG_PM_SLEEP
|
||
+static int tegra_dma_pm_suspend(struct device *dev)
|
||
+{
|
||
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
|
||
+ int i;
|
||
+ int ret;
|
||
+
|
||
+ /* Enable clock before accessing register */
|
||
+ ret = tegra_dma_runtime_resume(dev);
|
||
+ if (ret < 0)
|
||
+ return ret;
|
||
+
|
||
+ tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
|
||
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
|
||
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
|
||
+ struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
|
||
+
|
||
+ ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
|
||
+ ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
|
||
+ ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
|
||
+ ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
|
||
+ ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
|
||
+ }
|
||
+
|
||
+ /* Disable clock */
|
||
+ tegra_dma_runtime_suspend(dev);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int tegra_dma_pm_resume(struct device *dev)
|
||
+{
|
||
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
|
||
+ int i;
|
||
+ int ret;
|
||
+
|
||
+ /* Enable clock before accessing register */
|
||
+ ret = tegra_dma_runtime_resume(dev);
|
||
+ if (ret < 0)
|
||
+ return ret;
|
||
+
|
||
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
|
||
+ tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
|
||
+ tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
|
||
+
|
||
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
|
||
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
|
||
+ struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
|
||
+
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
|
||
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
|
||
+ (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
|
||
+ }
|
||
+
|
||
+ /* Disable clock */
|
||
+ tegra_dma_runtime_suspend(dev);
|
||
+ return 0;
|
||
+}
|
||
+#endif
|
||
+
|
||
+static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
|
||
+#ifdef CONFIG_PM_RUNTIME
|
||
+ .runtime_suspend = tegra_dma_runtime_suspend,
|
||
+ .runtime_resume = tegra_dma_runtime_resume,
|
||
+#endif
|
||
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
|
||
+};
|
||
+
|
||
+static struct platform_driver tegra_dmac_driver = {
|
||
+ .driver = {
|
||
+ .name = "tegra-apbdma",
|
||
+ .owner = THIS_MODULE,
|
||
+ .pm = &tegra_dma_dev_pm_ops,
|
||
+ .of_match_table = tegra_dma_of_match,
|
||
+ },
|
||
+ .probe = tegra_dma_probe,
|
||
+ .remove = tegra_dma_remove,
|
||
+};
|
||
+
|
||
+module_platform_driver(tegra_dmac_driver);
|
||
+
|
||
+MODULE_ALIAS("platform:tegra20-apbdma");
|
||
+MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
|
||
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
|
||
+MODULE_LICENSE("GPL v2");
|
||
diff -urN linux-3.0.101/drivers/dma/virt-dma.c linux-3.0.101.xm510/drivers/dma/virt-dma.c
|
||
--- linux-3.0.101/drivers/dma/virt-dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/virt-dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,123 @@
|
||
+/*
|
||
+ * Virtual DMA channel support for DMAengine
|
||
+ *
|
||
+ * Copyright (C) 2012 Russell King
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+#include <linux/device.h>
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/spinlock.h>
|
||
+
|
||
+#include "virt-dma.h"
|
||
+
|
||
+static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ return container_of(tx, struct virt_dma_desc, tx);
|
||
+}
|
||
+
|
||
+dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
|
||
+{
|
||
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
|
||
+ struct virt_dma_desc *vd = to_virt_desc(tx);
|
||
+ unsigned long flags;
|
||
+ dma_cookie_t cookie;
|
||
+
|
||
+ spin_lock_irqsave(&vc->lock, flags);
|
||
+ cookie = dma_cookie_assign(tx);
|
||
+
|
||
+ list_add_tail(&vd->node, &vc->desc_submitted);
|
||
+ spin_unlock_irqrestore(&vc->lock, flags);
|
||
+
|
||
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
|
||
+ vc, vd, cookie);
|
||
+
|
||
+ return cookie;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(vchan_tx_submit);
|
||
+
|
||
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
|
||
+ dma_cookie_t cookie)
|
||
+{
|
||
+ struct virt_dma_desc *vd;
|
||
+
|
||
+ list_for_each_entry(vd, &vc->desc_issued, node)
|
||
+ if (vd->tx.cookie == cookie)
|
||
+ return vd;
|
||
+
|
||
+ return NULL;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(vchan_find_desc);
|
||
+
|
||
+/*
|
||
+ * This tasklet handles the completion of a DMA descriptor by
|
||
+ * calling its callback and freeing it.
|
||
+ */
|
||
+static void vchan_complete(unsigned long arg)
|
||
+{
|
||
+ struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
|
||
+ struct virt_dma_desc *vd;
|
||
+ dma_async_tx_callback cb = NULL;
|
||
+ void *cb_data = NULL;
|
||
+ LIST_HEAD(head);
|
||
+
|
||
+ spin_lock_irq(&vc->lock);
|
||
+ list_splice_tail_init(&vc->desc_completed, &head);
|
||
+ vd = vc->cyclic;
|
||
+ if (vd) {
|
||
+ vc->cyclic = NULL;
|
||
+ cb = vd->tx.callback;
|
||
+ cb_data = vd->tx.callback_param;
|
||
+ }
|
||
+ spin_unlock_irq(&vc->lock);
|
||
+
|
||
+ if (cb)
|
||
+ cb(cb_data);
|
||
+
|
||
+ while (!list_empty(&head)) {
|
||
+ vd = list_first_entry(&head, struct virt_dma_desc, node);
|
||
+ cb = vd->tx.callback;
|
||
+ cb_data = vd->tx.callback_param;
|
||
+
|
||
+ list_del(&vd->node);
|
||
+
|
||
+ vc->desc_free(vd);
|
||
+
|
||
+ if (cb)
|
||
+ cb(cb_data);
|
||
+ }
|
||
+}
|
||
+
|
||
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
|
||
+{
|
||
+ while (!list_empty(head)) {
|
||
+ struct virt_dma_desc *vd = list_first_entry(head,
|
||
+ struct virt_dma_desc, node);
|
||
+ list_del(&vd->node);
|
||
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
|
||
+ vc->desc_free(vd);
|
||
+ }
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
|
||
+
|
||
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
|
||
+{
|
||
+ dma_cookie_init(&vc->chan);
|
||
+
|
||
+ spin_lock_init(&vc->lock);
|
||
+ INIT_LIST_HEAD(&vc->desc_submitted);
|
||
+ INIT_LIST_HEAD(&vc->desc_issued);
|
||
+ INIT_LIST_HEAD(&vc->desc_completed);
|
||
+
|
||
+ tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
|
||
+
|
||
+ vc->chan.device = dmadev;
|
||
+ list_add_tail(&vc->chan.device_node, &dmadev->channels);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(vchan_init);
|
||
+
|
||
+MODULE_AUTHOR("Russell King");
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/dma/virt-dma.h linux-3.0.101.xm510/drivers/dma/virt-dma.h
|
||
--- linux-3.0.101/drivers/dma/virt-dma.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/dma/virt-dma.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,152 @@
|
||
+/*
|
||
+ * Virtual DMA channel support for DMAengine
|
||
+ *
|
||
+ * Copyright (C) 2012 Russell King
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+#ifndef VIRT_DMA_H
|
||
+#define VIRT_DMA_H
|
||
+
|
||
+#include <linux/dmaengine.h>
|
||
+#include <linux/interrupt.h>
|
||
+
|
||
+#include "dmaengine.h"
|
||
+
|
||
+struct virt_dma_desc {
|
||
+ struct dma_async_tx_descriptor tx;
|
||
+ /* protected by vc.lock */
|
||
+ struct list_head node;
|
||
+};
|
||
+
|
||
+struct virt_dma_chan {
|
||
+ struct dma_chan chan;
|
||
+ struct tasklet_struct task;
|
||
+ void (*desc_free)(struct virt_dma_desc *);
|
||
+
|
||
+ spinlock_t lock;
|
||
+
|
||
+ /* protected by vc.lock */
|
||
+ struct list_head desc_submitted;
|
||
+ struct list_head desc_issued;
|
||
+ struct list_head desc_completed;
|
||
+
|
||
+ struct virt_dma_desc *cyclic;
|
||
+};
|
||
+
|
||
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
|
||
+{
|
||
+ return container_of(chan, struct virt_dma_chan, chan);
|
||
+}
|
||
+
|
||
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
|
||
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
|
||
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
|
||
+
|
||
+/**
|
||
+ * vchan_tx_prep - prepare a descriptor
|
||
+ * vc: virtual channel allocating this descriptor
|
||
+ * vd: virtual descriptor to prepare
|
||
+ * tx_flags: flags argument passed in to prepare function
|
||
+ */
|
||
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
|
||
+ struct virt_dma_desc *vd, unsigned long tx_flags)
|
||
+{
|
||
+ extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
|
||
+
|
||
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
|
||
+ vd->tx.flags = tx_flags;
|
||
+ vd->tx.tx_submit = vchan_tx_submit;
|
||
+
|
||
+ return &vd->tx;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * vchan_issue_pending - move submitted descriptors to issued list
|
||
+ * vc: virtual channel to update
|
||
+ *
|
||
+ * vc.lock must be held by caller
|
||
+ */
|
||
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
|
||
+{
|
||
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
|
||
+ return !list_empty(&vc->desc_issued);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * vchan_cookie_complete - report completion of a descriptor
|
||
+ * vd: virtual descriptor to update
|
||
+ *
|
||
+ * vc.lock must be held by caller
|
||
+ */
|
||
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
|
||
+{
|
||
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||
+
|
||
+ dma_cookie_complete(&vd->tx);
|
||
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
|
||
+ vd, vd->tx.cookie);
|
||
+ list_add_tail(&vd->node, &vc->desc_completed);
|
||
+
|
||
+ tasklet_schedule(&vc->task);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * vchan_cyclic_callback - report the completion of a period
|
||
+ * vd: virtual descriptor
|
||
+ */
|
||
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
|
||
+{
|
||
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||
+
|
||
+ vc->cyclic = vd;
|
||
+ tasklet_schedule(&vc->task);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * vchan_next_desc - peek at the next descriptor to be processed
|
||
+ * vc: virtual channel to obtain descriptor from
|
||
+ *
|
||
+ * vc.lock must be held by caller
|
||
+ */
|
||
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
|
||
+{
|
||
+ if (list_empty(&vc->desc_issued))
|
||
+ return NULL;
|
||
+
|
||
+ return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
|
||
+ * vc: virtual channel to get descriptors from
|
||
+ * head: list of descriptors found
|
||
+ *
|
||
+ * vc.lock must be held by caller
|
||
+ *
|
||
+ * Removes all submitted and issued descriptors from internal lists, and
|
||
+ * provides a list of all descriptors found
|
||
+ */
|
||
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
|
||
+ struct list_head *head)
|
||
+{
|
||
+ list_splice_tail_init(&vc->desc_submitted, head);
|
||
+ list_splice_tail_init(&vc->desc_issued, head);
|
||
+ list_splice_tail_init(&vc->desc_completed, head);
|
||
+}
|
||
+
|
||
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
|
||
+{
|
||
+ unsigned long flags;
|
||
+ LIST_HEAD(head);
|
||
+
|
||
+ spin_lock_irqsave(&vc->lock, flags);
|
||
+ vchan_get_all_descriptors(vc, &head);
|
||
+ spin_unlock_irqrestore(&vc->lock, flags);
|
||
+
|
||
+ vchan_dma_desc_free_list(vc, &head);
|
||
+}
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/drivers/mmc/core/sdio.c linux-3.0.101.xm510/drivers/mmc/core/sdio.c
|
||
--- linux-3.0.101/drivers/mmc/core/sdio.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mmc/core/sdio.c 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -111,11 +111,11 @@
|
||
|
||
cccr_vsn = data & 0x0f;
|
||
|
||
- if (cccr_vsn > SDIO_CCCR_REV_1_20) {
|
||
- printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n",
|
||
- mmc_hostname(card->host), cccr_vsn);
|
||
- return -EINVAL;
|
||
- }
|
||
+ //if (cccr_vsn > SDIO_CCCR_REV_1_20) {
|
||
+ //printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n",
|
||
+ //mmc_hostname(card->host), cccr_vsn);
|
||
+ //return -EINVAL;
|
||
+ //}
|
||
|
||
card->cccr.sdio_vsn = (data & 0xf0) >> 4;
|
||
|
||
diff -urN linux-3.0.101/drivers/mmc/host/arasan.c linux-3.0.101.xm510/drivers/mmc/host/arasan.c
|
||
--- linux-3.0.101/drivers/mmc/host/arasan.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mmc/host/arasan.c 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -0,0 +1,1765 @@
|
||
+/*
|
||
+ * Arasan MMC/SD/SDIO driver
|
||
+ *
|
||
+ * This is the driver for the Arasan MMC/SD/SDIO host controller
|
||
+ * integrated in the STMicroelectronics platforms
|
||
+ *
|
||
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@xxxxxx>
|
||
+ * Copyright (C) 2010 STMicroelectronics Ltd
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/module.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/mbus.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/scatterlist.h>
|
||
+#include <linux/irq.h>
|
||
+#include <linux/highmem.h>
|
||
+#include <linux/sched.h>
|
||
+#include <linux/mmc/host.h>
|
||
+#include <linux/mmc/arasan_plat.h>
|
||
+
|
||
+#include <asm/sizes.h>
|
||
+#include <asm/unaligned.h>
|
||
+
|
||
+#include <mach/hardware.h>
|
||
+
|
||
+#include "arasan.h"
|
||
+
|
||
+/* To enable more debug information. */
|
||
+#undef ARASAN_DEBUG
|
||
+//#define ARASAN_DEBUG
|
||
+//#define DEBUG
|
||
+
|
||
+#ifdef ARASAN_DEBUG
|
||
+#define DBG(fmt, args...) pr_info(fmt, ## args)
|
||
+#else
|
||
+#define DBG(fmt, args...) do { } while (0)
|
||
+#endif
|
||
+
|
||
+static int maxfreq = ARASAN_CLOCKRATE_MAX;
|
||
+//module_param(maxfreq, int, S_IRUGO);
|
||
+//MODULE_PARM_DESC(maxfreq, "Maximum card clock frequency (default 25MHz)");
|
||
+
|
||
+static unsigned int adma = 1;
|
||
+//module_param(adma, int, S_IRUGO);
|
||
+//MODULE_PARM_DESC(adma, "Disable/Enable the Advanced DMA mode");
|
||
+
|
||
+static unsigned int led;
|
||
+//module_param(led, int, S_IRUGO | S_IWUSR);
|
||
+//MODULE_PARM_DESC(led, "Enable|Disable LED");
|
||
+
|
||
+//static unsigned int pio;
|
||
+//module_param(pio, int, S_IRUGO);
|
||
+//MODULE_PARM_DESC(pio, "PIO mode (no DMA)");
|
||
+
|
||
+struct arasan_cap {
|
||
+ unsigned int timer_freq;
|
||
+ unsigned int timer_unit;
|
||
+ unsigned int base_clk_sd;
|
||
+ unsigned int max_blk_len;
|
||
+ unsigned int adma2;
|
||
+ unsigned int high_speed;
|
||
+ unsigned int sdma;
|
||
+ unsigned int suspend;
|
||
+ unsigned int voltage33;
|
||
+ unsigned int voltage30;
|
||
+ unsigned int voltage18;
|
||
+ unsigned int int_mode;
|
||
+ unsigned int spi;
|
||
+ unsigned int spi_block;
|
||
+};
|
||
+
|
||
+struct arasan_host {
|
||
+ void __iomem *base;
|
||
+ struct mmc_request *mrq;
|
||
+ unsigned int intr_en;
|
||
+ u8 ctrl;
|
||
+ unsigned int sg_frags;
|
||
+ struct scatterlist **sg;
|
||
+ struct timer_list timer;
|
||
+ struct mmc_host *mmc;
|
||
+ struct device *dev;
|
||
+ struct resource *res;
|
||
+ int irq;
|
||
+ struct arasan_cap cap;
|
||
+ u8 vdd;
|
||
+ unsigned int freq;
|
||
+ unsigned int status;
|
||
+ unsigned int adma;
|
||
+ unsigned int use_pio;
|
||
+ u16 pio_blksz;
|
||
+ u32 pio_blocks;
|
||
+ u32 *pio_blkbuf;
|
||
+ spinlock_t lock;
|
||
+ struct tasklet_struct card_tasklet;
|
||
+ u8 *adma_desc;
|
||
+ dma_addr_t adma_addr;
|
||
+
|
||
+ unsigned int need_poll;
|
||
+ unsigned int need_detect;
|
||
+ unsigned int card_irq;
|
||
+ unsigned int auto_cmd12;
|
||
+ unsigned int sdio_4bit_data;
|
||
+};
|
||
+
|
||
+static inline void arsan_sw_reset(struct arasan_host *host, unsigned int flag)
|
||
+{
|
||
+ /* After completing the reset, wait the HC clears these bits */
|
||
+ if (likely(flag == reset_all)) {
|
||
+ writeb(ARSAN_RESET_ALL, host->base + ARASAN_SW_RESET);
|
||
+ do { } while ((readb(host->base + ARASAN_SW_RESET)) &
|
||
+ ARSAN_RESET_ALL);
|
||
+ } else if (flag == reset_cmd_line) {
|
||
+ writeb(ARSAN_RESET_CMD_LINE, host->base + ARASAN_SW_RESET);
|
||
+ do { } while ((readb(host->base + ARASAN_SW_RESET)) &
|
||
+ ARSAN_RESET_CMD_LINE);
|
||
+
|
||
+ } else if (flag == reset_dat_line) {
|
||
+ writeb(ARSAN_RESET_DAT_LINE, host->base + ARASAN_SW_RESET);
|
||
+ do { } while ((readb(host->base + ARASAN_SW_RESET)) &
|
||
+ ARSAN_RESET_DAT_LINE);
|
||
+ }
|
||
+}
|
||
+
|
||
+static inline void arsan_hc_version(struct arasan_host *host)
|
||
+{
|
||
+ u16 version;
|
||
+
|
||
+ version = readw(host->base + ARASAN_HOST_VERSION);
|
||
+ DBG("Arasan MMC/SDIO:\n\tHC Vendor Version Number: %d\n",
|
||
+ (version >> 8));
|
||
+ DBG("\tHC SPEC Version Number: %d\n", (version & 0x00ff));
|
||
+}
|
||
+
|
||
+static void arasan_capabilities(struct arasan_host *host)
|
||
+{
|
||
+ unsigned int cap;
|
||
+ unsigned int max_blk_len;
|
||
+
|
||
+ cap = readl(host->base + ARASAN_CAPABILITIES);
|
||
+
|
||
+ DBG("\tArasan capabilities: 0x%x\n", cap);
|
||
+
|
||
+ host->cap.timer_freq = cap & 0x3f;
|
||
+ host->cap.timer_unit = (cap >> 7) & 0x1;
|
||
+
|
||
+ DBG("\tTimeout Clock Freq: %d %s\n", host->cap.timer_freq,
|
||
+ host->cap.timer_unit ? "MHz" : "KHz");
|
||
+
|
||
+ host->cap.base_clk_sd = (cap >> 8) & 0x3f;
|
||
+ DBG("\tBase Clock Freq for SD: %d MHz\n", host->cap.base_clk_sd);
|
||
+
|
||
+ max_blk_len = (cap >> 16) & 0x3;
|
||
+ switch (max_blk_len) {
|
||
+ case 0:
|
||
+ host->cap.max_blk_len = 512;
|
||
+ break;
|
||
+ case 1:
|
||
+ host->cap.max_blk_len = 1024;
|
||
+ break;
|
||
+ case 2:
|
||
+ host->cap.max_blk_len = 2048;
|
||
+ break;
|
||
+ case 3:
|
||
+ host->cap.max_blk_len = 4096;
|
||
+ break;
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+ DBG("\tMax Block size: %d bytes\n", host->cap.max_blk_len);
|
||
+ //printk("\tMax Block size: %d bytes\n", host->cap.max_blk_len);
|
||
+
|
||
+ host->cap.adma2 = (cap >> 19) & 0x1;
|
||
+ host->cap.high_speed = (cap >> 21) & 0x1;
|
||
+ host->cap.sdma = (cap >> 22) & 0x1;
|
||
+
|
||
+ DBG("\tadma2 %s, high speed %s, sdma %s\n",
|
||
+ host->cap.adma2 ? "Yes" : "Not",
|
||
+ host->cap.high_speed ? "Yes" : "Not",
|
||
+ host->cap.sdma ? "Yes" : "Not");
|
||
+
|
||
+ host->cap.suspend = (cap >> 23) & 0x1;
|
||
+ DBG("\tsuspend/resume %s suported\n",
|
||
+ host->cap.adma2 ? "is" : "Not");
|
||
+
|
||
+ /* Disable adma user option if cap not supported. */
|
||
+ if (!host->cap.adma2)
|
||
+ adma = 0;
|
||
+
|
||
+ host->cap.voltage33 = (cap >> 24) & 0x1;
|
||
+ host->cap.voltage30 = (cap >> 25) & 0x1;
|
||
+ host->cap.voltage18 = (cap >> 26) & 0x1;
|
||
+ host->cap.int_mode = (cap >> 27) & 0x1;
|
||
+ host->cap.spi = (cap >> 29) & 0x1; /* 是否支持spi模式 */
|
||
+ host->cap.spi_block = (cap >> 30) & 0x1;
|
||
+
|
||
+ if (host->cap.voltage33)
|
||
+ DBG("\t3.3V voltage suported\n");
|
||
+ if (host->cap.voltage30)
|
||
+ DBG("\t3.0V voltage suported\n");
|
||
+ if (host->cap.voltage18)
|
||
+ DBG("\t1.8V voltage suported\n");
|
||
+
|
||
+ if (host->cap.int_mode)
|
||
+ DBG("\tInterrupt Mode supported\n");
|
||
+ if (host->cap.spi)
|
||
+ DBG("\tSPI Mode supported\n");
|
||
+ if (host->cap.spi_block)
|
||
+ DBG("\tSPI Block Mode supported\n");
|
||
+}
|
||
+
|
||
+static void arasan_ctrl_led(struct arasan_host *host, unsigned int flag)
|
||
+{
|
||
+ //printk(KERN_EMERG"arasan_ctrl_led.\n");
|
||
+ if (led) {
|
||
+ u8 ctrl_reg = readb(host->base + ARASAN_HOST_CTRL);
|
||
+
|
||
+ //printk(KERN_EMERG"flag:%d\n", flag);
|
||
+ if (flag)
|
||
+ ctrl_reg |= ARASAN_HOST_CTRL_LED;
|
||
+ else
|
||
+ ctrl_reg &= ~ARASAN_HOST_CTRL_LED;
|
||
+
|
||
+ host->ctrl = ctrl_reg;
|
||
+ writeb(host->ctrl, host->base + ARASAN_HOST_CTRL);
|
||
+ }
|
||
+}
|
||
+
|
||
+static inline void arasan_set_interrupts(struct arasan_host *host)
|
||
+{
|
||
+ host->intr_en = ARASAN_IRQ_DEFAULT_MASK;
|
||
+ writel(host->intr_en, host->base + ARASAN_NORMAL_INT_STATUS_EN);
|
||
+ writel(host->intr_en, host->base + ARASAN_NORMAL_INT_SIGN_EN);
|
||
+
|
||
+}
|
||
+
|
||
+static inline void arasan_clear_interrupts(struct arasan_host *host)
|
||
+{
|
||
+ writel(0, host->base + ARASAN_NORMAL_INT_STATUS_EN);
|
||
+ writel(0, host->base + ARASAN_ERR_INT_STATUS_EN);
|
||
+ writel(0, host->base + ARASAN_NORMAL_INT_SIGN_EN);
|
||
+}
|
||
+
|
||
+static void arasan_power_set(struct arasan_host *host, unsigned int pwr, u8 vdd)
|
||
+{
|
||
+ u8 pwr_reg;
|
||
+
|
||
+ pwr_reg = readb(host->base + ARASAN_PWR_CTRL);
|
||
+
|
||
+ host->vdd = (1 << vdd);
|
||
+
|
||
+ if (pwr) {
|
||
+ pwr_reg &= 0xf1;
|
||
+
|
||
+ if ((host->vdd & MMC_VDD_165_195) && host->cap.voltage18)
|
||
+ pwr_reg |= ARASAN_PWR_BUS_VOLTAGE_18;
|
||
+ else if ((host->vdd & MMC_VDD_29_30) && host->cap.voltage30)
|
||
+ pwr_reg |= ARASAN_PWR_BUS_VOLTAGE_30;
|
||
+ else if ((host->vdd & MMC_VDD_32_33) && host->cap.voltage33)
|
||
+ pwr_reg |= ARASAN_PWR_BUS_VOLTAGE_33;
|
||
+
|
||
+ //pwr_reg |= ARASAN_PWR_CTRL_UP;
|
||
+ /* 注意:电路设计和外围电路设计相反 */
|
||
+ pwr_reg &= ~ARASAN_PWR_CTRL_UP;
|
||
+ } else
|
||
+ //pwr_reg &= ~ARASAN_PWR_CTRL_UP;
|
||
+ pwr_reg |= ARASAN_PWR_CTRL_UP;
|
||
+
|
||
+ DBG("%s: pwr_reg 0x%x, host->vdd = 0x%x\n", __func__, pwr_reg,
|
||
+ host->vdd);
|
||
+ //printk(KERN_EMERG"pwr_reg:%2x\n", pwr_reg);
|
||
+ writeb(pwr_reg, host->base + ARASAN_PWR_CTRL);
|
||
+}
|
||
+
|
||
+static int arasan_test_card(struct arasan_host *host)
|
||
+{
|
||
+ unsigned int ret = 0;
|
||
+ u32 present = readl(host->base + ARASAN_PRESENT_STATE);
|
||
+
|
||
+ if (!host->need_detect)
|
||
+ goto out;
|
||
+ if (likely(!(present & ARASAN_PRESENT_STATE_CARD_PRESENT))) {
|
||
+ ret = -1;
|
||
+ }
|
||
+
|
||
+out:
|
||
+#ifdef ARASAN_DEBUG
|
||
+ if (present & ARASAN_PRESENT_STATE_CARD_STABLE)
|
||
+ pr_info("\tcard stable...");
|
||
+ if (!(present & ARASAN_PRESENT_STATE_WR_EN))
|
||
+ pr_info("\tcard Write protected...");
|
||
+ if (present & ARASAN_PRESENT_STATE_BUFFER_RD_EN)
|
||
+ pr_info("\tPIO Read Enable...");
|
||
+ if (present & ARASAN_PRESENT_STATE_BUFFER_WR_EN)
|
||
+ pr_info("\tPIO Write Enable...");
|
||
+ if (present & ARASAN_PRESENT_STATE_RD_ACTIVE)
|
||
+ pr_info("\tRead Xfer data...");
|
||
+ if (present & ARASAN_PRESENT_STATE_WR_ACTIVE)
|
||
+ pr_info("\tWrite Xfer data...");
|
||
+ if (present & ARASAN_PRESENT_STATE_DAT_ACTIVE)
|
||
+ pr_info("\tDAT line active...");
|
||
+#endif
|
||
+ return ret;
|
||
+}
|
||
+static void arasan_set_clock(struct arasan_host *host, unsigned int freq)
|
||
+{
|
||
+ u16 clock = 0;
|
||
+ unsigned long flags;
|
||
+
|
||
+ /* 协商阶段400K-低速模式 */
|
||
+ spin_lock_irqsave(&host->lock, flags);
|
||
+
|
||
+ if ((host->freq != freq) && (freq)) {
|
||
+ u16 divisor;
|
||
+
|
||
+ /* Ensure clock is off before making any changes */
|
||
+ //writew(clock, host->base + ARASAN_CLOCK_CTRL);
|
||
+
|
||
+ /* core checks if this is a good freq < max_freq */
|
||
+ host->freq = freq;
|
||
+
|
||
+ printk("%s:\n\tnew freq %d", __func__, host->freq);
|
||
+
|
||
+ /* Work out divisor for specified clock frequency */
|
||
+ for (divisor = 1; divisor <= 256; divisor *= 2)
|
||
+ /* Find first divisor producing a frequency less
|
||
+ * than or equal to MHz */
|
||
+ if ((maxfreq / divisor) <= freq)
|
||
+ break;
|
||
+
|
||
+ DBG("\tdivisor %d", divisor);
|
||
+ // printk("\tdivisor %d\n", divisor);
|
||
+ /* Set the clock divisor and enable the internal clock */
|
||
+ clock = divisor << (ARASAN_CLOCK_CTRL_SDCLK_SHIFT);
|
||
+ //clock = 0 << (ARASAN_CLOCK_CTRL_SDCLK_SHIFT);
|
||
+ clock &= ARASAN_CLOCK_CTRL_SDCLK_MASK;
|
||
+ clock |= ARASAN_CLOCK_CTRL_ICLK_ENABLE;
|
||
+ writew(clock, host->base + ARASAN_CLOCK_CTRL);
|
||
+ // printk("\tread divisor %x\n", readw(host->base + ARASAN_CLOCK_CTRL));
|
||
+
|
||
+ /* Busy wait for the clock to become stable */
|
||
+ do { } while (((readw(host->base + ARASAN_CLOCK_CTRL)) &
|
||
+ ARASAN_CLOCK_CTRL_ICLK_STABLE) == 0);
|
||
+
|
||
+ /* Enable the SD clock */
|
||
+ clock |= ARASAN_CLOCK_CTRL_SDCLK_ENABLE;
|
||
+ writew(clock, host->base + ARASAN_CLOCK_CTRL);
|
||
+
|
||
+ DBG("\tclk ctrl reg. [0x%x]\n",
|
||
+ (unsigned int)readw(host->base + ARASAN_CLOCK_CTRL));
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&host->lock, flags);
|
||
+}
|
||
+
|
||
+/* Read the response from the card */
|
||
+static void arasan_get_resp(struct mmc_command *cmd, struct arasan_host *host)
|
||
+{
|
||
+ unsigned int i;
|
||
+ unsigned int resp[4];
|
||
+
|
||
+ for (i = 0; i < 4; i++)
|
||
+ resp[i] = readl(host->base + ARASAN_RSP(i));
|
||
+
|
||
+ if (cmd->flags & MMC_RSP_136) {
|
||
+ cmd->resp[3] = (resp[0] << 8);
|
||
+ cmd->resp[2] = (resp[0] >> 24) | (resp[1] << 8);
|
||
+ cmd->resp[1] = (resp[1] >> 24) | (resp[2] << 8);
|
||
+ cmd->resp[0] = (resp[2] >> 24) | (resp[3] << 8);
|
||
+ } else {
|
||
+ cmd->resp[0] = resp[0];
|
||
+ cmd->resp[1] = resp[1];
|
||
+ }
|
||
+
|
||
+ /* 数据出错之后,response寄存器当中仍然会有接收到响应数据 */
|
||
+ //printk("resp[0]:%x\n",resp[0]);
|
||
+
|
||
+ DBG("%s: resp length %s\n-(CMD%u):\n %08x %08x %08x %08x\n"
|
||
+ "-RAW reg:\n %08x %08x %08x %08x\n",
|
||
+ __func__, (cmd->flags & MMC_RSP_136) ? "136" : "48", cmd->opcode,
|
||
+ cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3],
|
||
+ resp[0], resp[1], resp[2], resp[3]);
|
||
+}
|
||
+
|
||
+int blocks;
|
||
+static void arasan_read_block_pio(struct arasan_host *host)
|
||
+{
|
||
+ unsigned long flags;
|
||
+ u16 blksz;
|
||
+
|
||
+ DBG("\tPIO reading\n");
|
||
+
|
||
+ local_irq_save(flags);
|
||
+
|
||
+ for (blksz = host->pio_blksz; blksz > 0; blksz -= 4) {
|
||
+
|
||
+ *host->pio_blkbuf =
|
||
+ readl(host->base + ARASAN_BUFF);
|
||
+ host->pio_blkbuf++;
|
||
+ }
|
||
+
|
||
+ local_irq_restore(flags);
|
||
+}
|
||
+
|
||
+static void arasan_write_block_pio(struct arasan_host *host)
|
||
+{
|
||
+ unsigned long flags;
|
||
+ u16 blksz;
|
||
+
|
||
+ DBG("\tPIO writing\n");
|
||
+ //printk("\tPIO writing\n");
|
||
+ local_irq_save(flags);
|
||
+
|
||
+ //printk("host->pio_blksize:%d\n", host->pio_blksz);
|
||
+ for (blksz = host->pio_blksz; blksz > 0; blksz -= 4) {
|
||
+ //writel(0x00,host->base + ARASAN_BUFF);
|
||
+
|
||
+ writel(*host->pio_blkbuf,
|
||
+ host->base + ARASAN_BUFF);
|
||
+ host->pio_blkbuf++;
|
||
+ }
|
||
+
|
||
+ local_irq_restore(flags);
|
||
+}
|
||
+
|
||
+static void arasan_data_pio(struct arasan_host *host)
|
||
+{
|
||
+ static int i = 0;
|
||
+ static int j = 0;
|
||
+ int sg_length = 0;
|
||
+ static int size = 0;
|
||
+ static int size1 = 0;
|
||
+ if (host->pio_blocks == 0)
|
||
+ return;
|
||
+
|
||
+
|
||
+ if (host->status == STATE_DATA_READ) {
|
||
+ host->pio_blkbuf = sg_virt(host->sg[i])+size;
|
||
+ sg_length = sg_dma_len(host->sg[i]);
|
||
+ //printk("rd-%d, %d, %p, %d\n", host->sg_frags, i, host->pio_blkbuf, sg_length);
|
||
+
|
||
+ while (readl(host->base + ARASAN_PRESENT_STATE) &
|
||
+ ARASAN_PRESENT_STATE_BUFFER_RD_EN) {
|
||
+
|
||
+ arasan_read_block_pio(host);
|
||
+
|
||
+ size += host->pio_blksz;
|
||
+ //printk("size:%d\n", size);
|
||
+ if (size == sg_length) {
|
||
+ /* 下个sg */
|
||
+ i++;
|
||
+ size = 0;
|
||
+
|
||
+ if (i==host->sg_frags) {
|
||
+ //printk("hello world.\n");
|
||
+ host->pio_blocks--;
|
||
+ if (unlikely(host->pio_blocks != 0)) {
|
||
+ printk("SD-MMC fatal error.\n");
|
||
+ }
|
||
+ i = 0;
|
||
+ /* 数据全部操作完 */
|
||
+ if (host->sg)
|
||
+ kfree(host->sg);
|
||
+ break;
|
||
+ }
|
||
+ host->pio_blkbuf = sg_virt(host->sg[i]);
|
||
+ sg_length = sg_dma_len(host->sg[i]);
|
||
+ //printk("rd-%d, %d, %p, %d\n", host->sg_frags, i, host->pio_blkbuf, sg_length);
|
||
+ }
|
||
+
|
||
+ host->pio_blocks--;
|
||
+ if (host->pio_blocks == 0)
|
||
+ break;
|
||
+ }
|
||
+ //printk("rd-sg_frags:%d, cur:%d\n", host->sg_frags, i);
|
||
+
|
||
+ } else {
|
||
+ host->pio_blkbuf = sg_virt(host->sg[j])+size1;
|
||
+ sg_length = sg_dma_len(host->sg[j]);
|
||
+ //printk("wr-%d, %d, %p, %d\n", host->sg_frags, j, host->pio_blkbuf, sg_length);
|
||
+ while (readl(host->base + ARASAN_PRESENT_STATE) &
|
||
+ ARASAN_PRESENT_STATE_BUFFER_WR_EN) {
|
||
+
|
||
+ arasan_write_block_pio(host);
|
||
+
|
||
+ size1 += host->pio_blksz;
|
||
+ if (size1 == sg_length) {
|
||
+ /* 下个sg */
|
||
+ j++;
|
||
+ size1 = 0;
|
||
+
|
||
+ if (j==host->sg_frags) {
|
||
+ //printk("hello world.\n");
|
||
+ host->pio_blocks--;
|
||
+ if (unlikely(host->pio_blocks != 0)) {
|
||
+ printk("SD-MMC fatal error.\n");
|
||
+ }
|
||
+ j = 0;
|
||
+
|
||
+ /* 数据全部操作完 */
|
||
+ if (host->sg)
|
||
+ kfree(host->sg);
|
||
+ break;
|
||
+ }
|
||
+ host->pio_blkbuf = sg_virt(host->sg[j]);
|
||
+ sg_length = sg_dma_len(host->sg[j]);
|
||
+ //printk("dwr-%d, %d, %p, %d, %d\n", host->sg_frags, j, host->pio_blkbuf, sg_length, size1);
|
||
+ }
|
||
+
|
||
+ host->pio_blocks--;
|
||
+ if (host->pio_blocks == 0)
|
||
+ break;
|
||
+ }
|
||
+ //printk("wr-%d, %d, %p, %d, %d\n", host->sg_frags, j, host->pio_blkbuf, sg_length, size1);
|
||
+ //printk("wr-sg_frags:%d, cur:%d\n", host->sg_frags, j);
|
||
+ }
|
||
+
|
||
+ DBG("\tPIO transfer complete.\n");
|
||
+}
|
||
+
|
||
+static void arasan_start_cmd(struct arasan_host *host, struct mmc_command *cmd)
|
||
+{
|
||
+ u16 cmdreg = 0;
|
||
+
|
||
+ /* Command Request */
|
||
+ cmdreg = ARASAN_CMD_INDEX(cmd->opcode);
|
||
+ DBG("%s: cmd type %04x, CMD%d\n", __func__,
|
||
+ mmc_resp_type(cmd), cmd->opcode);
|
||
+
|
||
+ if (cmd->flags & MMC_RSP_BUSY) {
|
||
+ cmdreg |= ARASAN_CMD_RSP_48BUSY;
|
||
+ DBG("\tResponse length 48 check Busy.\n");
|
||
+ } else if (cmd->flags & MMC_RSP_136) {
|
||
+ cmdreg |= ARASAN_CMD_RSP_136;
|
||
+ DBG("\tResponse length 136\n");
|
||
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
|
||
+ cmdreg |= ARASAN_CMD_RSP_48;
|
||
+ DBG("\tResponse length 48\n");
|
||
+ } else {
|
||
+ cmdreg |= ARASAN_CMD_RSP_NONE;
|
||
+ DBG("\tNo Response\n");
|
||
+ }
|
||
+
|
||
+ if (cmd->flags & MMC_RSP_CRC) {
|
||
+ cmdreg |= ARASAN_CMD_CHECK_CMDCRC;
|
||
+ DBG("\tCheck the CRC field in the response\n");
|
||
+ }
|
||
+ if (cmd->flags & MMC_RSP_OPCODE) {
|
||
+ cmdreg |= ARASAN_CMD_INDX_CHECK;
|
||
+ DBG("\tCheck the Index field in the response\n");
|
||
+ }
|
||
+
|
||
+ /* Wait until the CMD line is not in use */
|
||
+ do { } while ((readl(host->base + ARASAN_PRESENT_STATE)) &
|
||
+ ARASAN_PRESENT_STATE_CMD_INHIBIT);
|
||
+
|
||
+ /* Set the argument register */
|
||
+ writel(cmd->arg, host->base + ARASAN_ARG);
|
||
+
|
||
+ /* Data present and must be transferred */
|
||
+ if (likely(host->mrq->data)) {
|
||
+ cmdreg |= ARASAN_CMD_DATA_PRESENT;
|
||
+ if (cmd->flags & MMC_RSP_BUSY)
|
||
+ /* Wait for data inhibit */
|
||
+ do { } while ((readl(host->base +
|
||
+ ARASAN_PRESENT_STATE)) &
|
||
+ ARASAN_PRESENT_STATE_DAT_INHIBIT);
|
||
+ }
|
||
+
|
||
+ /* Write the Command */
|
||
+ writew(cmdreg, host->base + ARASAN_CMD);
|
||
+
|
||
+ DBG("\tcmd: 0x%x cmd reg: 0x%x - cmd->arg 0x%x, reg 0x%x\n",
|
||
+ cmdreg, readw(host->base + ARASAN_CMD), cmd->arg,
|
||
+ readl(host->base + ARASAN_ARG));
|
||
+}
|
||
+
|
||
+#ifdef ARASAN_DEBUG
|
||
+static void arasan_adma_error(struct arasan_host *host)
|
||
+{
|
||
+ u8 status = readb(host->base + ARASAN_ADMA_ERR_STATUS);
|
||
+
|
||
+ if (status & ARASAN_ADMA_ERROR_LENGTH)
|
||
+ pr_err("-ADMA Length Mismatch Error...");
|
||
+
|
||
+ if (status & ARASAN_ADMA_ERROR_ST_TFR)
|
||
+ pr_err("-Transfer Data Error desc: ");
|
||
+ else if (status & ARASAN_ADMA_ERROR_ST_FDS)
|
||
+ pr_err("-Fetch Data Error desc: ");
|
||
+ else if (status & ARASAN_ADMA_ERROR_ST_STOP)
|
||
+ pr_err("-Stop DMA Data Error desc: ");
|
||
+
|
||
+ pr_err("0x%x", readl(host->base + ARASAN_ADMA_ADDRESS));
|
||
+}
|
||
+
|
||
+static void arasan_adma_dump_desc(u8 *desc)
|
||
+{
|
||
+ __le32 *dma;
|
||
+ __le16 *len;
|
||
+ u8 attr;
|
||
+
|
||
+ pr_info("\tDescriptors:");
|
||
+
|
||
+ while (1) {
|
||
+ dma = (__le32 *) (desc + 4);
|
||
+ len = (__le16 *) (desc + 2);
|
||
+ attr = *desc;
|
||
+
|
||
+ pr_info("\t\t%p: Buff 0x%08x, len %d, Attr 0x%02x\n",
|
||
+ desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
|
||
+
|
||
+ desc += 8;
|
||
+
|
||
+ if (attr & 2) /* END of descriptor */
|
||
+ break;
|
||
+ }
|
||
+}
|
||
+#else
|
||
+static void arasan_adma_error(struct arasan_host *host)
|
||
+{
|
||
+}
|
||
+
|
||
+static void arasan_adma_dump_desc(u8 *desc)
|
||
+{
|
||
+}
|
||
+#endif
|
||
+
|
||
+static int arasan_init_sg(struct arasan_host *host)
|
||
+{
|
||
+
|
||
+ host->adma_desc = kmalloc((ARASAN_DMA_DESC_NUM * 2 + 1) * 4, \
|
||
+ GFP_KERNEL);
|
||
+
|
||
+ if (unlikely(host->adma_desc == NULL))
|
||
+ return -ENOMEM;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void arasan_adma_table_pre(struct arasan_host *host,
|
||
+ struct mmc_data *data)
|
||
+{
|
||
+ int direction, i;
|
||
+ u8 *desc;
|
||
+ struct scatterlist *sg;
|
||
+ int len;
|
||
+ dma_addr_t addr;
|
||
+
|
||
+ if (host->status == STATE_DATA_READ)
|
||
+ direction = DMA_FROM_DEVICE;
|
||
+ else
|
||
+ direction = DMA_TO_DEVICE;
|
||
+
|
||
+ DBG("\t%s: sg entries %d\n", __func__, data->sg_len);
|
||
+
|
||
+ /*
|
||
+ * 得到scatterlist当中有多少个内在块需要传输
|
||
+ * 实际上sg_frags在正常的情况下的值 = data->sg_len
|
||
+ * -这个函数的实际目的是将虚拟地址映射成物理地址
|
||
+ * 这里和主调函数当中作的动作是一样的,所以这里可以屏蔽掉
|
||
+ */
|
||
+ /*
|
||
+ host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
|
||
+ data->sg_len, direction);
|
||
+ */
|
||
+ desc = host->adma_desc;
|
||
+
|
||
+ //printk("sg_frags:%d\n", host->sg_frags);
|
||
+ for_each_sg(data->sg, sg, host->sg_frags, i) {
|
||
+ addr = sg_dma_address(sg); /* 每一个scatter指定的DMA传输的地址 */
|
||
+ len = sg_dma_len(sg);
|
||
+ //printk("len:%d\n", len);
|
||
+ //printk("addr:%x\n", addr);
|
||
+
|
||
+ DBG("\t\tFrag %d: addr 0x%x, len %d\n", i, addr, len);
|
||
+
|
||
+ /* Preparing the descriptor */
|
||
+ desc[7] = (addr >> 24) & 0xff;
|
||
+ desc[6] = (addr >> 16) & 0xff;
|
||
+ desc[5] = (addr >> 8) & 0xff;
|
||
+ desc[4] = (addr >> 0) & 0xff;
|
||
+
|
||
+ desc[3] = (len >> 8) & 0xff;
|
||
+ desc[2] = (len >> 0) & 0xff;
|
||
+
|
||
+ desc[1] = 0x00;
|
||
+ desc[0] = 0x21;
|
||
+
|
||
+ desc += 8;
|
||
+ }
|
||
+ //printk("i=%d\n", i);
|
||
+ /* 返回到最后一个descriptor,并将该descriptor设置为最后一个descriptor,
|
||
+ * 让sdio知道什么时候停止取descriptor
|
||
+ */
|
||
+ desc -= 8;
|
||
+ desc[0] = 0x23;
|
||
+
|
||
+ arasan_adma_dump_desc(host->adma_desc);
|
||
+
|
||
+ /* 将kmalloc获得的descriptors的虚拟地址转换成物理地址 */
|
||
+ host->adma_addr = dma_map_single(mmc_dev(host->mmc),
|
||
+ host->adma_desc,
|
||
+ (ARASAN_DMA_DESC_NUM * 2 + 1) * 4,
|
||
+ DMA_TO_DEVICE);
|
||
+
|
||
+ writel(host->adma_addr, host->base + ARASAN_ADMA_ADDRESS);
|
||
+}
|
||
+
|
||
+static void arasan_adma_table_post(struct arasan_host *host,
|
||
+ struct mmc_data *data)
|
||
+{
|
||
+ int direction;
|
||
+
|
||
+ if (host->status == STATE_DATA_READ)
|
||
+ direction = DMA_FROM_DEVICE;
|
||
+ else
|
||
+ direction = DMA_TO_DEVICE;
|
||
+
|
||
+ DBG("\t%s\n", __func__);
|
||
+
|
||
+ dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
|
||
+ (ARASAN_DMA_DESC_NUM * 2 + 1) * 4, DMA_TO_DEVICE);
|
||
+
|
||
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, direction);
|
||
+}
|
||
+
|
||
+static int arasan_setup_data(struct arasan_host *host)
|
||
+{
|
||
+ u16 blksz=0;
|
||
+ u16 xfer = 0;
|
||
+ struct mmc_data *data = host->mrq->data;
|
||
+
|
||
+ DBG("%s:\n\t%s mode, data dir: %s; Buff=0x%08x,"
|
||
+ "blocks=%d, blksz=%d\n", __func__, host->use_pio ? "PIO" : "DMA",
|
||
+ (data->flags & MMC_DATA_READ) ? "read" : "write",
|
||
+ (unsigned int)sg_virt(data->sg), data->blocks, data->blksz);
|
||
+
|
||
+ /* Transfer Direction */
|
||
+ if (data->flags & MMC_DATA_READ) {
|
||
+ xfer |= ARASAN_XFER_DATA_DIR;
|
||
+ host->status = STATE_DATA_READ;
|
||
+ } else {
|
||
+ xfer &= ~ARASAN_XFER_DATA_DIR;
|
||
+ host->status = STATE_DATA_WRITE;
|
||
+ }
|
||
+
|
||
+ xfer |= ARASAN_XFER_BLK_COUNT_EN;
|
||
+
|
||
+ if (data->blocks > 1) {
|
||
+ if (host->auto_cmd12) // sd2.0
|
||
+ xfer |= ARASAN_XFER_MULTI_BLK | ARASAN_XFER_AUTOCMD12;
|
||
+ else // sdio2.0
|
||
+ xfer |= ARASAN_XFER_MULTI_BLK;
|
||
+ }
|
||
+
|
||
+ //printk("blksz:%d\n", data->blksz);
|
||
+ /* Set the block size register */
|
||
+ //blksz = ARASAN_BLOCK_SIZE_SDMA_512KB;
|
||
+ blksz |= (data->blksz & ARASAN_BLOCK_SIZE_TRANSFER);
|
||
+ blksz |= (data->blksz & 0x1000) ? ARASAN_BLOCK_SIZE_SDMA_8KB : 0;
|
||
+
|
||
+ //printk("blksz:%x\n", blksz);
|
||
+ writew(blksz, host->base + ARASAN_BLK_SIZE);
|
||
+
|
||
+ /* Set the block count register */
|
||
+ writew(data->blocks, host->base + ARASAN_BLK_COUNT);
|
||
+ //printk("blocks:%d\n", data->blocks);
|
||
+
|
||
+ /* PIO mode is used when 'pio' var is set by the user or no
|
||
+ * sdma is available from HC caps. */
|
||
+ if (unlikely(host->use_pio || (host->cap.sdma == 0))) {
|
||
+ int i = 0;
|
||
+ struct scatterlist *_sg;
|
||
+ //int len;
|
||
+
|
||
+ _sg = NULL;
|
||
+ host->sg = NULL;
|
||
+ host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
|
||
+ data->sg_len,
|
||
+ (host->status & STATE_DATA_READ) ?
|
||
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||
+
|
||
+ host->sg = (struct scatterlist **)kmalloc(sizeof(struct scatterlist*)*host->sg_frags,GFP_KERNEL);
|
||
+ if (unlikely(host->sg==NULL)) {
|
||
+ printk("SD-MMC kmalloc failed.\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ //printk("sg_frags:%d\n", host->sg_frags);
|
||
+ for_each_sg(data->sg, _sg, host->sg_frags, i) {
|
||
+ host->sg[i] = _sg;
|
||
+ //printk("sg_len:%d\n",sg_dma_len(_sg));
|
||
+ }
|
||
+
|
||
+ /* PIO的数据传输在中断当中完成 */
|
||
+ //printk("blksz:%d\n", blksz);
|
||
+ //printk("blocks:%d\n", data->blocks);
|
||
+ host->pio_blksz = data->blksz;
|
||
+ host->pio_blocks = data->blocks;
|
||
+ //host->pio_blkbuf = sg_virt(data->sg);
|
||
+ } else {
|
||
+ dma_addr_t phys_addr;
|
||
+
|
||
+ /* Enable DMA */
|
||
+ xfer |= ARASAN_XFER_DMA_EN;
|
||
+
|
||
+ /* Scatter list init */
|
||
+ host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
|
||
+ data->sg_len,
|
||
+ (host->status & STATE_DATA_READ) ?
|
||
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||
+
|
||
+ phys_addr = sg_dma_address(data->sg);
|
||
+
|
||
+ if (likely(host->adma)) {
|
||
+ /* Set the host control register dma bits for adma
|
||
+ * if supported and enabled by user. */
|
||
+ host->ctrl |= ARASAN_HOST_CTRL_ADMA2_32;
|
||
+
|
||
+ /* Prepare ADMA table */
|
||
+ arasan_adma_table_pre(host, data);
|
||
+ } else {
|
||
+ /* SDMA Mode selected (default mode) */
|
||
+ host->ctrl &= ~ARASAN_HOST_CTRL_ADMA2_64;
|
||
+
|
||
+ writel((unsigned int)phys_addr,
|
||
+ host->base + ARASAN_SDMA_SYS_ADDR);
|
||
+ }
|
||
+ writeb(host->ctrl, host->base + ARASAN_HOST_CTRL);
|
||
+
|
||
+ }
|
||
+ /* Set the data transfer mode register */
|
||
+ writew(xfer, host->base + ARASAN_XFER_MODE);
|
||
+
|
||
+ DBG("\tHC Reg [xfer 0x%x] [blksz 0x%x] [blkcount 0x%x] [CRTL 0x%x]\n",
|
||
+ readw(host->base + ARASAN_XFER_MODE),
|
||
+ readw(host->base + ARASAN_BLK_SIZE),
|
||
+ readw(host->base + ARASAN_BLK_COUNT),
|
||
+ readb(host->base + ARASAN_HOST_CTRL));
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void arasan_finish_data(struct arasan_host *host)
|
||
+{
|
||
+ struct mmc_data *data = host->mrq->data;
|
||
+
|
||
+ DBG("\t%s\n", __func__);
|
||
+
|
||
+ if (unlikely(host->pio_blkbuf)) {
|
||
+ host->pio_blksz = 0;
|
||
+ host->pio_blocks = 0;
|
||
+ host->pio_blkbuf = NULL;
|
||
+ } else {
|
||
+ if (likely(host->adma)) {
|
||
+ arasan_adma_table_post(host, data);
|
||
+ } else {
|
||
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
||
+ host->sg_frags,
|
||
+ (host->status & STATE_DATA_READ) ?
|
||
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||
+ }
|
||
+ }
|
||
+
|
||
+ data->bytes_xfered = data->blocks * data->blksz;
|
||
+ host->status = STATE_CMD;
|
||
+}
|
||
+
|
||
+static int arasan_finish_cmd(unsigned int err_status, unsigned int status,
|
||
+ unsigned int opcode)
|
||
+{
|
||
+ int ret = 0;
|
||
+
|
||
+ if (unlikely(err_status)) {
|
||
+ if (err_status & ARASAN_CMD_TIMEOUT) {
|
||
+ DBG("\tcmd_timeout...\n");
|
||
+ //printk("\tcmd_timeout...\n");
|
||
+ ret = -ETIMEDOUT;
|
||
+ }
|
||
+ if (err_status & ARASAN_CMD_CRC_ERROR) {
|
||
+ DBG("\tcmd_crc_error...\n");
|
||
+ ret = -EILSEQ;
|
||
+ }
|
||
+ if (err_status & ARASAN_CMD_END_BIT_ERROR) {
|
||
+ DBG("\tcmd_end_bit_error...\n");
|
||
+ ret = -EILSEQ;
|
||
+ }
|
||
+ if (err_status & ARASAN_CMD_INDEX_ERROR) {
|
||
+ DBG("\tcmd_index_error...\n");
|
||
+ ret = -EILSEQ;
|
||
+ }
|
||
+ }
|
||
+ if (likely(status & ARASAN_N_CMD_COMPLETE))
|
||
+ DBG("\tCommand (CMD%u) Completed irq...\n", opcode);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+//#define ARASAN_IRQ_DEFAULT_MASK 0x02ff00fb
|
||
+/* Enable/Disable Normal and Error interrupts */
|
||
+static void aranan_enable_sdio_irq(struct mmc_host *mmc, int enable)
|
||
+{
|
||
+ unsigned long flags;
|
||
+ struct arasan_host *host = mmc_priv(mmc);
|
||
+ static unsigned int arasan_irq_mask;
|
||
+
|
||
+ DBG("%s: %s CARD_IRQ\n", __func__, enable ? "enable" : "disable");
|
||
+ //printk("%s: %s CARD_IRQ\n", __func__, enable ? "enable" : "disable");
|
||
+
|
||
+ arasan_irq_mask = host->card_irq ?ARASAN_IRQ_DEFAULT_MASK:
|
||
+ ARASAN_IRQ_DEFAULT_MASK_NOCARDIRQ;
|
||
+ spin_lock_irqsave(&host->lock, flags);
|
||
+ if (enable)
|
||
+ host->intr_en = arasan_irq_mask;
|
||
+ else
|
||
+ host->intr_en = 0;
|
||
+
|
||
+ writel(host->intr_en, host->base + ARASAN_NORMAL_INT_STATUS_EN);
|
||
+ writel(host->intr_en, host->base + ARASAN_NORMAL_INT_SIGN_EN);
|
||
+ spin_unlock_irqrestore(&host->lock, flags);
|
||
+}
|
||
+
|
||
+static void arasan_timeout_timer(unsigned long data)
|
||
+{
|
||
+ struct arasan_host *host = (struct arasan_host *)data;
|
||
+ struct mmc_request *mrq;
|
||
+ unsigned long flags;
|
||
+
|
||
+ spin_lock_irqsave(&host->lock, flags);
|
||
+
|
||
+ //printk("timeout.\n");
|
||
+ //printk("host->status:%d\n", host->status);
|
||
+ if ((host->mrq) && ((host->status==STATE_CMD) ||
|
||
+ (host->status==STATE_DATA_READ) ||
|
||
+ (host->status==STATE_DATA_WRITE))) {
|
||
+ mrq = host->mrq;
|
||
+
|
||
+ DBG("%s: Timeout waiting for hardware interrupt.\n",
|
||
+ mmc_hostname(host->mmc));
|
||
+
|
||
+ writel(0xffffffff, host->base + ARASAN_NORMAL_INT_STATUS);
|
||
+ aranan_enable_sdio_irq(host->mmc, 1);
|
||
+
|
||
+ if (mrq->data) {
|
||
+ arasan_finish_data(host);
|
||
+ arsan_sw_reset(host, reset_dat_line);
|
||
+ mrq->data->error = -ETIMEDOUT;
|
||
+ }
|
||
+ if (likely(mrq->cmd)) {
|
||
+ mrq->cmd->error = -ETIMEDOUT;
|
||
+ arsan_sw_reset(host, reset_cmd_line);
|
||
+ arasan_get_resp(mrq->cmd, host);
|
||
+ }
|
||
+ arasan_ctrl_led(host, 0);
|
||
+ host->mrq = NULL;
|
||
+ mmc_request_done(host->mmc, mrq);
|
||
+ }
|
||
+ spin_unlock_irqrestore(&host->lock, flags);
|
||
+}
|
||
+
|
||
+/* Process requests from the MMC layer */
|
||
+static void arasan_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||
+{
|
||
+ /* 进入到这个函数时,mrq当中有用的就只剩下cmd和data */
|
||
+ struct arasan_host *host = mmc_priv(mmc);
|
||
+ struct mmc_command *cmd = mrq->cmd;
|
||
+ unsigned long flags;
|
||
+
|
||
+ BUG_ON(host->mrq != NULL);
|
||
+
|
||
+
|
||
+ spin_lock_irqsave(&host->lock, flags);
|
||
+
|
||
+ DBG(">>> araran_request:\n");
|
||
+ /* Check that there is a card in the slot */
|
||
+ if (unlikely(arasan_test_card(host) < 0)) {
|
||
+ DBG("%s: Error: No card present...\n", mmc_hostname(host->mmc));
|
||
+
|
||
+ mrq->cmd->error = -ENOMEDIUM;
|
||
+ mmc_request_done(mmc, mrq); /* 来自core.c,通知上层操作已经完成 */
|
||
+ spin_unlock_irqrestore(&host->lock, flags);
|
||
+ return;
|
||
+ }
|
||
+ //printk(KERN_EMERG"The card is present.\n");
|
||
+
|
||
+ host->mrq = mrq;
|
||
+
|
||
+ host->status = STATE_CMD;
|
||
+ if (likely(mrq->data)) {
|
||
+ //printk("1\n");
|
||
+ //mdelay(10000);
|
||
+ arasan_setup_data(host);
|
||
+ }
|
||
+
|
||
+
|
||
+ /* Turn-on/off the LED when send/complete a cmd */
|
||
+ arasan_ctrl_led(host, 1);
|
||
+
|
||
+ //printk("start_cmd.\n");
|
||
+ //mdelay(5000);
|
||
+ arasan_start_cmd(host, cmd);
|
||
+
|
||
+ mod_timer(&host->timer, jiffies + 5 * HZ);
|
||
+
|
||
+
|
||
+ DBG("<<< araran_request done!\n");
|
||
+ //printk("<<< araran_request done!\n");
|
||
+ spin_unlock_irqrestore(&host->lock, flags);
|
||
+}
|
||
+
|
||
+static int arasan_get_ro(struct mmc_host *mmc)
|
||
+{
|
||
+ struct arasan_host *host = mmc_priv(mmc);
|
||
+
|
||
+ u32 ro = readl(host->base + ARASAN_PRESENT_STATE);
|
||
+ if (!(ro & ARASAN_PRESENT_STATE_WR_EN))
|
||
+ return 1;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/* I/O bus settings (MMC clock/power ...) */
|
||
+static void arasan_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||
+{
|
||
+ struct arasan_host *host = mmc_priv(mmc);
|
||
+ u8 ctrl_reg = readb(host->base + ARASAN_HOST_CTRL);
|
||
+
|
||
+ DBG("%s: pwr %d, clk %d, vdd %d, bus_width %d, timing %d\n",
|
||
+ __func__, ios->power_mode, ios->clock, ios->vdd, ios->bus_width,
|
||
+ ios->timing);
|
||
+ //printk(KERN_EMERG"clk:%d\n", ios->clock);
|
||
+
|
||
+ /* Set the power supply mode */
|
||
+ if (ios->power_mode == MMC_POWER_OFF) {
|
||
+ arasan_power_set(host, 0, ios->vdd);
|
||
+ }
|
||
+ else {
|
||
+ //printk("powerup.\n");
|
||
+ //mdelay(5000);
|
||
+ arasan_power_set(host, 1, ios->vdd);
|
||
+ //printk("after powerup.\n");
|
||
+ //mdelay(5000);
|
||
+ }
|
||
+
|
||
+ /* Timing (high speed supported?) */
|
||
+ if ((ios->timing == MMC_TIMING_MMC_HS ||
|
||
+ ios->timing == MMC_TIMING_SD_HS) && host->cap.high_speed)
|
||
+ /*
|
||
+ * 本调试板在高速模式下同样只能用下降沿输出
|
||
+ */
|
||
+ {
|
||
+ //ctrl_reg |= ARASAN_HOST_CTRL_HIGH_SPEED;
|
||
+ }
|
||
+ /* Clear the current bus width configuration */
|
||
+ ctrl_reg &= ~ARASAN_HOST_CTRL_SD_MASK;
|
||
+
|
||
+ /* Set SD bus bit mode */
|
||
+ switch (ios->bus_width) {
|
||
+ case MMC_BUS_WIDTH_8:
|
||
+ ctrl_reg |= ARASAN_HOST_CTRL_SD8;
|
||
+ break;
|
||
+ case MMC_BUS_WIDTH_4:
|
||
+ ctrl_reg |= ARASAN_HOST_CTRL_SD;
|
||
+ break;
|
||
+ /* added by me, 不能插拔识别卡的问题 */
|
||
+ case MMC_BUS_WIDTH_1:
|
||
+ ctrl_reg &= ~ARASAN_HOST_CTRL_SD;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ /* Default to maximum timeout */
|
||
+ writeb(0x0e, host->base + ARASAN_TIMEOUT_CTRL);
|
||
+
|
||
+ /* Disable Card Interrupt in Host in case we change
|
||
+ * the Bus Width. */
|
||
+ aranan_enable_sdio_irq(host->mmc, 0);
|
||
+
|
||
+ host->ctrl = ctrl_reg;
|
||
+ writeb(host->ctrl, host->base + ARASAN_HOST_CTRL);
|
||
+
|
||
+ aranan_enable_sdio_irq(host->mmc, 1);
|
||
+
|
||
+ /* Set clock */
|
||
+ arasan_set_clock(host, ios->clock);
|
||
+ //printk(KERN_EMERG"clk set done.\n");
|
||
+ //mdelay(5000);
|
||
+}
|
||
+
|
||
+/* Tasklet for Card-detection */
|
||
+static void arasan_tasklet_card(unsigned long data)
|
||
+{
|
||
+ unsigned long flags;
|
||
+ struct arasan_host *host = (struct arasan_host *)data;
|
||
+
|
||
+ spin_lock_irqsave(&host->lock, flags);
|
||
+
|
||
+ if (likely((readl(host->base + ARASAN_PRESENT_STATE) &
|
||
+ ARASAN_PRESENT_STATE_CARD_PRESENT))) {
|
||
+ if (host->mrq) {
|
||
+ // printk("card_detection.\n");
|
||
+ pr_err("%s: Card removed during transfer!\n",
|
||
+ mmc_hostname(host->mmc));
|
||
+ /* Reset cmd and dat lines */
|
||
+ arsan_sw_reset(host, reset_cmd_line);
|
||
+ arsan_sw_reset(host, reset_dat_line);
|
||
+
|
||
+ if (likely(host->mrq->cmd)) {
|
||
+ host->mrq->cmd->error = -ENOMEDIUM;
|
||
+ mmc_request_done(host->mmc, host->mrq);
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ spin_unlock_irqrestore(&host->lock, flags);
|
||
+
|
||
+ if (likely(host->mmc))
|
||
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
|
||
+}
|
||
+
|
||
+static void arasan_setup_hc(struct arasan_host *host);
|
||
+static irqreturn_t arasan_irq(int irq, void *dev)
|
||
+{
|
||
+ struct arasan_host *host = dev;
|
||
+ unsigned int status, err_status, handled = 0;
|
||
+ unsigned short int_en;
|
||
+ struct mmc_command *cmd = NULL;
|
||
+ struct mmc_data *data = NULL;
|
||
+
|
||
+ /* host->lock已经有加锁机制,所以这个变量不用担心互斥的产生 */
|
||
+ //static int get_card_irq = 0;
|
||
+
|
||
+ spin_lock(&host->lock);
|
||
+
|
||
+ /* Interrupt Status */
|
||
+ status = readl(host->base + ARASAN_NORMAL_INT_STATUS);
|
||
+ err_status = (status >> 16) & 0xffff;
|
||
+
|
||
+ /* 清中断状态寄存器 */
|
||
+ DBG("\tclear status and exit...\n");
|
||
+ writel(status, host->base + ARASAN_NORMAL_INT_STATUS);
|
||
+
|
||
+ if (likely(status & ARASAN_N_CARD_IRQ)) {
|
||
+ /* 进入这个中断时不能直接退出,因为这个接收到这个中断
|
||
+ * 时,还可能接收到数据完成中断
|
||
+ */
|
||
+ /*
|
||
+ * mmc_signal_sdio_irq(host->mmc);
|
||
+ * 这让人崩溃,这个函数里面将所有的中断关闭掉,在这期间是会有
|
||
+ * 其它的中断的(这是由于实现不同造成的), 所以这里用下面的方法
|
||
+ * 取代,只关闭sdio中断
|
||
+ */
|
||
+ int_en = readl(host->base+ARASAN_NORMAL_INT_STATUS_EN);
|
||
+ int_en &= ~(0x1<<8);
|
||
+
|
||
+ writel(int_en, host->base + ARASAN_NORMAL_INT_STATUS_EN);
|
||
+ /*
|
||
+ * 替代 wake_up_prcess,因为我们对外设由电源开关,而在
|
||
+ * 本分代码的逻辑上是先打开中断,然后在打开开关,这就造成
|
||
+ * 在打开中断时,dat1为低,造成中断,但此时,sdio_irq_thread
|
||
+ * 还未被赋值
|
||
+ */
|
||
+ host->mmc->sdio_irq_pending = true;
|
||
+ if (host->mmc && host->mmc->sdio_irq_thread)
|
||
+ wake_up_process(host->mmc->sdio_irq_thread);
|
||
+
|
||
+ }
|
||
+
|
||
+#if 0
|
||
+ if ((status&ARASAN_N_CARD_REMOVAL) ||
|
||
+ (status&ARASAN_N_CARD_INS)) {
|
||
+ /* 防止中断抖动 */
|
||
+ int_en = readw(host->base + ARASAN_NORMAL_INT_SIGN_EN);
|
||
+ //printk("int_en:%x\n", int_en);
|
||
+ int_en = (int_en & (~(0x3<<6)));
|
||
+ //printk("int_en:%x\n", int_en);
|
||
+ writew(int_en, host->base+ARASAN_NORMAL_INT_SIGN_EN);
|
||
+ }
|
||
+#endif
|
||
+
|
||
+
|
||
+ DBG("%s: Normal IRQ status 0x%x, Error status 0x%x\n",
|
||
+ __func__, status & 0xffff, err_status);
|
||
+
|
||
+ //printk("z\n");
|
||
+ /*
|
||
+ printk(KERN_EMERG"%s: Normal IRQ status 0x%x, Error status 0x%x\n",
|
||
+ __func__, status & 0xffff, err_status);
|
||
+ */
|
||
+ //printk("arasan_irq.\n");
|
||
+ //
|
||
+
|
||
+ if ((!host->need_poll) &&
|
||
+ ((status & ARASAN_N_CARD_INS) ||
|
||
+ (status & ARASAN_N_CARD_REMOVAL))) {
|
||
+ tasklet_schedule(&host->card_tasklet);
|
||
+ }
|
||
+
|
||
+ if (unlikely(!host->mrq)) {
|
||
+ goto out;
|
||
+ }
|
||
+
|
||
+
|
||
+ cmd = host->mrq->cmd;
|
||
+ data = host->mrq->data;
|
||
+
|
||
+ cmd->error = 0;
|
||
+ /* Check for any CMD interrupts */
|
||
+ if (likely(status & ARASAN_INT_CMD_MASK)) {
|
||
+ //printk("response to cmd.\n");
|
||
+ cmd->error = arasan_finish_cmd(err_status, status, cmd->opcode);
|
||
+ if (cmd->error)
|
||
+ arsan_sw_reset(host, reset_cmd_line);
|
||
+
|
||
+ if ((host->status == STATE_CMD) || cmd->error) {
|
||
+ arasan_get_resp(cmd, host);
|
||
+
|
||
+ handled = 1;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Check for any data interrupts */
|
||
+ if (likely((status & ARASAN_INT_DATA_MASK)) && data) {
|
||
+ data->error = 0;
|
||
+ if (unlikely(err_status)) {
|
||
+ //printk("error.\n");
|
||
+ if (err_status & ARASAN_DATA_TIMEOUT_ERROR) {
|
||
+ DBG("\tdata_timeout_error...\n");
|
||
+ data->error = -ETIMEDOUT;
|
||
+ }
|
||
+ if (err_status & ARASAN_DATA_CRC_ERROR) {
|
||
+ DBG("\tdata_crc_error...\n");
|
||
+ //printk("\tdata_crc_error...\n");
|
||
+ data->error = -EILSEQ;
|
||
+
|
||
+ }
|
||
+ if (err_status & ARASAN_DATA_END_ERROR) {
|
||
+ DBG("\tdata_end_error...\n");
|
||
+ data->error = -EILSEQ;
|
||
+ }
|
||
+ if (err_status & ARASAN_AUTO_CMD12_ERROR) {
|
||
+ unsigned int err_cmd12 =
|
||
+ readw(host->base + ARASAN_CMD12_ERR_STATUS);
|
||
+
|
||
+ DBG("\tc12err 0x%04x\n", err_cmd12);
|
||
+
|
||
+ if (err_cmd12 & ARASAN_AUTOCMD12_ERR_NOTEXE)
|
||
+ data->stop->error = -ENOEXEC;
|
||
+
|
||
+ if ((err_cmd12 & ARASAN_AUTOCMD12_ERR_TIMEOUT)
|
||
+ && !(err_cmd12 & ARASAN_AUTOCMD12_ERR_CRC))
|
||
+ /* Timeout Error */
|
||
+ data->stop->error = -ETIMEDOUT;
|
||
+ else if (!(err_cmd12 &
|
||
+ ARASAN_AUTOCMD12_ERR_TIMEOUT)
|
||
+ && (err_cmd12 &
|
||
+ ARASAN_AUTOCMD12_ERR_CRC))
|
||
+ /* CRC Error */
|
||
+ data->stop->error = -EILSEQ;
|
||
+ else if ((err_cmd12 &
|
||
+ ARASAN_AUTOCMD12_ERR_TIMEOUT)
|
||
+ && (err_cmd12 &
|
||
+ ARASAN_AUTOCMD12_ERR_CRC))
|
||
+ DBG("\tCMD line Conflict\n");
|
||
+ }
|
||
+ arsan_sw_reset(host, reset_dat_line);
|
||
+ handled = 1;
|
||
+ } else {
|
||
+ if (likely(((status & ARASAN_N_BUFF_READ) ||
|
||
+ status & ARASAN_N_BUFF_WRITE))) {
|
||
+ DBG("\tData R/W interrupts...\n");
|
||
+ //printk("\tData R/W interrupts...\n");
|
||
+ arasan_data_pio(host); /* 使用pio的方式进行数据传输
|
||
+ 必须在中断当中手动将数据
|
||
+ 全部写入到buff当中去
|
||
+ */
|
||
+ }
|
||
+
|
||
+ if (likely(status & ARASAN_N_DMA_IRQ))
|
||
+ DBG("\tDMA interrupts...\n");
|
||
+
|
||
+ if (likely(status & ARASAN_N_TRANS_COMPLETE)) {
|
||
+ //printk("done.\n");
|
||
+ DBG("\tData XFER completed interrupts...\n");
|
||
+ arasan_finish_data(host);
|
||
+ if (data->stop) {
|
||
+ u32 opcode = data->stop->opcode;
|
||
+ data->stop->error =
|
||
+ arasan_finish_cmd(err_status,
|
||
+ status, opcode);
|
||
+ arasan_get_resp(data->stop, host);
|
||
+ }
|
||
+ handled = 1;
|
||
+ }
|
||
+
|
||
+ }
|
||
+ }
|
||
+ if (err_status & ARASAN_ADMA_ERROR) {
|
||
+ //printk("3.\n");
|
||
+ DBG("\tADMA Error...\n");
|
||
+ arasan_adma_error(host);
|
||
+ cmd->error = -EIO;
|
||
+ }
|
||
+ if (err_status & ARASAN_CURRENT_LIMIT_ERROR) {
|
||
+ //printk("4.\n");
|
||
+ DBG("\tPower Fail...\n");
|
||
+ cmd->error = -EIO;
|
||
+ }
|
||
+
|
||
+ if (likely(host->mrq && handled)) {
|
||
+ //printk("5.\n");
|
||
+ struct mmc_request *mrq = host->mrq;
|
||
+
|
||
+ arasan_ctrl_led(host, 0);
|
||
+
|
||
+ del_timer(&host->timer);
|
||
+
|
||
+ host->mrq = NULL;
|
||
+ //DBG("\tcalling mmc_request_done...\n");
|
||
+ mmc_request_done(host->mmc, mrq);
|
||
+ }
|
||
+out:
|
||
+
|
||
+#if 0
|
||
+ if ((status&ARASAN_N_CARD_REMOVAL) ||
|
||
+ (status&ARASAN_N_CARD_INS)) {
|
||
+ int_en = (int_en | (0x3<<6));
|
||
+ //printk("int_en:%x\n", int_en);
|
||
+ writel(int_en, host->base + ARASAN_NORMAL_INT_STATUS_EN);
|
||
+ writew(int_en, host->base+ARASAN_NORMAL_INT_SIGN_EN);
|
||
+ }
|
||
+#endif
|
||
+
|
||
+ spin_unlock(&host->lock);
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+static void arasan_setup_hc(struct arasan_host *host)
|
||
+{
|
||
+ /* Clear all the interrupts before resetting */
|
||
+ arasan_clear_interrupts(host);
|
||
+
|
||
+ /* Reset All and get the HC version */
|
||
+ arsan_sw_reset(host, reset_all);
|
||
+
|
||
+ /* Print HC version and SPEC */
|
||
+ arsan_hc_version(host);
|
||
+
|
||
+ /* Set capabilities and print theri info */
|
||
+ arasan_capabilities(host);
|
||
+
|
||
+ //printk("before arasan_set_interrupt.\n");
|
||
+ //mdelay(10000);
|
||
+
|
||
+ /* Enable interrupts */
|
||
+ //arasan_set_interrupts(host);
|
||
+
|
||
+ //printk("arasan_set_interrupt.\n");
|
||
+ //mdelay(10000);
|
||
+}
|
||
+
|
||
+static const struct mmc_host_ops arasan_ops = {
|
||
+ .request = arasan_request,
|
||
+ .get_ro = arasan_get_ro, /* 是否写保护 */
|
||
+ .set_ios = arasan_set_ios,
|
||
+ .enable_sdio_irq = aranan_enable_sdio_irq,
|
||
+};
|
||
+
|
||
+static int arasan_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct mmc_host *mmc = NULL;
|
||
+ struct arasan_host *host = NULL;
|
||
+ const struct arasan_platform_data *arasan_data;
|
||
+ struct resource *r, *r1;
|
||
+ int ret, irq;
|
||
+
|
||
+
|
||
+ //printk(KERN_EMERG"platform_get_resource.\n");
|
||
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+
|
||
+ r1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||
+
|
||
+ //printk(KERN_EMERG"platform_get_irq_byname.\n");
|
||
+ //irq = platform_get_irq_byname(pdev, "sdio1_irq");
|
||
+ irq = r1->start;
|
||
+
|
||
+ //printk(KERN_EMERG"irq:%d\n", irq);
|
||
+ //printk(KERN_EMERG"start:%x\n", r->start);
|
||
+ arasan_data = pdev->dev.platform_data;
|
||
+
|
||
+ if (!r || irq < 0 || !arasan_data)
|
||
+ return -ENXIO;
|
||
+
|
||
+ /* wifi pdn 引脚 */
|
||
+ if (arasan_data->p_powerup != NULL) {
|
||
+ arasan_data->p_powerup();
|
||
+ } else {
|
||
+ //printk("needn't powerup.\n");
|
||
+ }
|
||
+
|
||
+
|
||
+ //printk(KERN_EMERG"request_mem_region.\n");
|
||
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
|
||
+ if (!r) {
|
||
+ pr_err("%s: ERROR: memory allocation failed\n", __func__);
|
||
+ return -EBUSY;
|
||
+ goto out;
|
||
+ }
|
||
+ //printk(KERN_EMERG"mmc_alloc_host.\n");
|
||
+ /* Allocate the mmc_host with private data size */
|
||
+ mmc = mmc_alloc_host(sizeof(struct arasan_host), &pdev->dev);
|
||
+ if (!mmc) {
|
||
+ pr_err("%s: ERROR: mmc_alloc_host failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out;
|
||
+ }
|
||
+
|
||
+ /* Verify resource from the platform */
|
||
+ //printk(KERN_EMERG"arasan_claim_host.\n");
|
||
+ ret = arasan_claim_resource(pdev);
|
||
+ if (ret < 0)
|
||
+ goto out;
|
||
+
|
||
+ host = mmc_priv(mmc);
|
||
+ host->mmc = mmc;
|
||
+ host->dev = &pdev->dev;
|
||
+ host->res = r;
|
||
+
|
||
+ /* card_irq
|
||
+ * sd卡不接收card_irq
|
||
+ */
|
||
+ host->card_irq = arasan_data->card_irq;
|
||
+
|
||
+ /* auto_cmd12
|
||
+ * 区别sdio2.0和sd2.0的多块传输
|
||
+ * sdio2.0-cmd52
|
||
+ */
|
||
+ host->auto_cmd12 = arasan_data->auto_cmd12;
|
||
+
|
||
+ /* use_pio
|
||
+ * 使用主机读取buf,还是DMA完成数据的传输
|
||
+ */
|
||
+ host->use_pio = arasan_data->use_pio;
|
||
+ //printk("host->use_pio=%d\n", host->use_pio);
|
||
+
|
||
+ /*
|
||
+ * need_detect
|
||
+ * wifi和sd卡使用同一个驱动,sd卡需要进行探测,而wifi不需要
|
||
+ * 这里影响每次发送请求时是否判断卡的存在
|
||
+ */
|
||
+ host->need_detect = arasan_data->need_detect;
|
||
+ //printk("host->need_detect=%d\n", host->need_detect);
|
||
+
|
||
+ host->sdio_4bit_data = arasan_data->sdio_4bit_data;
|
||
+
|
||
+ /*
|
||
+ * need_poll 表示是否由子系统来完成卡的探测 ,
|
||
+ * 这里从表面上看起来其实是一样的,因为arasan_tasklet_card也是启动
|
||
+ * host->detect工作队列
|
||
+ */
|
||
+ host->need_poll = arasan_data->need_poll;
|
||
+ if (host->need_poll) {
|
||
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
|
||
+ DBG("\tHC needs polling to detect the card...");
|
||
+ } else
|
||
+ /* no set the MMC_CAP_NEEDS_POLL in cap */
|
||
+ tasklet_init(&host->card_tasklet, arasan_tasklet_card,
|
||
+ (unsigned long)host);
|
||
+
|
||
+ //printk(KERN_EMERG"ioremap.\n");
|
||
+ //printk(KERN_EMERG"r->start:%x\n", r->start);
|
||
+ host->base = ioremap(r->start, resource_size(r));
|
||
+ if (!host->base) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out;
|
||
+ }
|
||
+ //printk(KERN_EMERG"host->base:%x\n", host->base);
|
||
+
|
||
+ //printk(KERN_EMERG"request_irq.\n");
|
||
+ ret =
|
||
+ request_irq(irq, arasan_irq, IRQF_SHARED, ARASAN_DRIVER_NAME, host);
|
||
+ if (ret) {
|
||
+ pr_err("%s: cannot assign irq %d\n", __func__, irq);
|
||
+ goto out;
|
||
+ } else
|
||
+ host->irq = irq;
|
||
+
|
||
+ spin_lock_init(&host->lock);
|
||
+
|
||
+ /* Setup the Host Controller according to its capabilities */
|
||
+ //printk(KERN_EMERG"arasan_setup_hc.\n");
|
||
+ arasan_setup_hc(host);
|
||
+
|
||
+ mmc->ops = &arasan_ops;
|
||
+
|
||
+ /* 这些支持的电压值会被用做探测过程中协商host与sd卡的电压 */
|
||
+ if (host->cap.voltage33)
|
||
+ mmc->ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
|
||
+ if (host->cap.voltage30)
|
||
+ mmc->ocr_avail |= MMC_VDD_29_30;
|
||
+ if (host->cap.voltage18)
|
||
+ mmc->ocr_avail |= MMC_VDD_165_195;
|
||
+
|
||
+ mmc->caps = MMC_CAP_SDIO_IRQ; /* sdio 使用 */
|
||
+
|
||
+ /*
|
||
+ * 在探测时不使用spi模式
|
||
+ */
|
||
+#if 0
|
||
+ if (host->cap.spi)
|
||
+ mmc->caps |= MMC_CAP_SPI;
|
||
+#endif
|
||
+ /* mmc子系统会根据这个值来确认是否设置host的4bits/8bits总线宽度 */
|
||
+ if (host->sdio_4bit_data) {
|
||
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
|
||
+ }
|
||
+
|
||
+ /* mmc子系统会根据这个值来确认是否设置host的高速模式 */
|
||
+ if (!host->card_irq) {
|
||
+ if (host->cap.high_speed)
|
||
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
|
||
+ }
|
||
+
|
||
+ /* 在子系统当中没神马用 */
|
||
+ host->freq = host->cap.timer_freq * 1000000;
|
||
+ //host->use_pio = pio;
|
||
+
|
||
+ /* 在子系统当中没多大用,只是填充一个字段,上层想要知道这个值时,能够有一种方法获取 */
|
||
+ mmc->f_max = maxfreq;
|
||
+ mmc->f_min = mmc->f_max / 256;
|
||
+
|
||
+ /*
|
||
+ * Maximum block size. This is specified in the capabilities register.
|
||
+ */
|
||
+ mmc->max_blk_size = host->cap.max_blk_len;
|
||
+ /* 确认一次最多传送多少块 */
|
||
+ mmc->max_blk_count = 65535;
|
||
+
|
||
+ /*
|
||
+ * 1表示接口一次只能完成一段数据的传输,另外的值表示能够完成多次传输
|
||
+ * 根据max_seq_size的定义和影响,可以看出max_segs*max_seg_size最大为max_seq_size
|
||
+ * 8*65536 = 524288, 那为什么这里的值,我却设置为16啦?因为不是每个segment都能达
|
||
+ * 到65536,根据实验发现它们不是所有的bio segment都能顺利和合并成65536,而这是因为
|
||
+ * 这些segment可能在物理上并不连续,所以要设置为大于8,在最坏情况下,每个页框都不能
|
||
+ * 合并,那么这个值应该为 524288/4096 = 128
|
||
+ *
|
||
+ * 另外这个值的设置也和一次request的最大长度有关,虽然没有得到确切的理论依据,但
|
||
+ * 根据试验结果来看,如果这个值设置过小,request的最大长度就不能达到下面设置的max_req_size
|
||
+ *
|
||
+ * 这里对性能有性能有一定的提升,读可提升10%-%15,写也提升可达10%左右
|
||
+ *
|
||
+ */
|
||
+ mmc->max_segs = 128;
|
||
+ //mmc->max_segs = 1;
|
||
+
|
||
+ /* 一个segment即一次单位DMA传输,即表示一次单位DMA传输能传输多少数据,通常这个值为65535 */
|
||
+ mmc->max_seg_size = 65536;
|
||
+
|
||
+ /*
|
||
+ * 和这个参数相关的参数包括:max_hw_sector, max_sectors
|
||
+ * max_hw_sector则来自于max_seg_size/512,max_sector来自于max_hw_sectors和
|
||
+ * BLK_DEF_MAX_SECTORS(1024)中较小的一个.
|
||
+ *
|
||
+ * 对于上层而言,它关心的参数是max_sector,它代表在通用块层一次最大能完成多少传输,可以看出这个值最大
|
||
+ * 最大为1024,所以这个值设置成大于1024*512以上的值没有意义,为了增大吞吐量,这个值设为最大1024*512
|
||
+ */
|
||
+ //mmc->max_req_size = mmc->max_blk_count*mmc->max_blk_size;
|
||
+ mmc->max_req_size = 524288;
|
||
+
|
||
+ /* Passing the "pio" option, we force the driver to not
|
||
+ * use any DMA engines. */
|
||
+ if (unlikely(host->use_pio)) {
|
||
+ adma = 0;
|
||
+ DBG("\tPIO mode\n");
|
||
+ printk("\tPIO mode\n");
|
||
+ } else {
|
||
+ if (likely(adma)) {
|
||
+ /* Turn-on the ADMA if supported by the HW
|
||
+ * or Fall back to SDMA in case of failures */
|
||
+ DBG("\tADMA mode\n");
|
||
+ //printk("\tADMA mode\n");
|
||
+ //printk(KERN_EMERG"arasan_init_sg.\n");
|
||
+ ret = arasan_init_sg(host);
|
||
+ if (unlikely(ret)) {
|
||
+ pr_warning("\tSG init failed (disable ADMA)\n");
|
||
+ adma = 0;
|
||
+ } else {
|
||
+ /* Set the Maximum number of segments
|
||
+ * becasue we can do scatter/gathering in ADMA
|
||
+ * mode. */
|
||
+ //mmc->max_hw_segs = 128;
|
||
+ }
|
||
+ } else
|
||
+ DBG("\tSDMA mode\n");
|
||
+ }
|
||
+ host->adma = adma;
|
||
+
|
||
+ //printk("before add host.\n");
|
||
+ //mdelay(10000);
|
||
+
|
||
+ platform_set_drvdata(pdev, mmc);
|
||
+ ret = mmc_add_host(mmc);
|
||
+ if (ret)
|
||
+ goto out;
|
||
+
|
||
+ /*
|
||
+ * 定义一个定时器,超时时间在mod_timer当中定义
|
||
+ * */
|
||
+ //printk(KERN_EMERG"setup_timer.\n");
|
||
+ setup_timer(&host->timer, arasan_timeout_timer, (unsigned long)host);
|
||
+
|
||
+ pr_info("%s: driver initialized... IRQ: %d, Base addr 0x%x\n",
|
||
+ mmc_hostname(mmc), irq, (unsigned int)host->base);
|
||
+
|
||
+//#ifdef ARASAN_DEBUG
|
||
+ led = 1;
|
||
+//#endif
|
||
+ return 0;
|
||
+out:
|
||
+ if (host) {
|
||
+ if (host->irq)
|
||
+ free_irq(host->irq, host);
|
||
+ if (host->base)
|
||
+ iounmap(host->base);
|
||
+ }
|
||
+ if (r)
|
||
+ release_resource(r);
|
||
+ if (mmc)
|
||
+ mmc_free_host(mmc);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int arasan_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
|
||
+
|
||
+ if (mmc) {
|
||
+ struct arasan_host *host = mmc_priv(mmc);
|
||
+
|
||
+ arasan_clear_interrupts(host);
|
||
+ free_irq(host->irq, host);
|
||
+ if (!host->need_poll) {
|
||
+ tasklet_kill(&host->card_tasklet);
|
||
+ }
|
||
+ mmc_remove_host(mmc);
|
||
+ arasan_power_set(host, 0, -1);
|
||
+ iounmap(host->base);
|
||
+ if (likely(host->adma))
|
||
+ kfree(host->adma_desc);
|
||
+ release_resource(host->res);
|
||
+ mmc_free_host(mmc);
|
||
+ }
|
||
+ platform_set_drvdata(pdev, NULL);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+#if 0
|
||
+static void arasan_release(struct device *device)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+static struct resource arasan1_resource[] = {
|
||
+ [0] = {
|
||
+ .name = "sdio1_addr",
|
||
+ .start = SDIO1_BASE,
|
||
+ .end = SDIO1_BASE + 0x10000 - 1,
|
||
+ .flags = IORESOURCE_MEM,
|
||
+ },
|
||
+ [1] = {
|
||
+ .name = "sdio1_irq",
|
||
+ .start = SDIO1_IRQ,
|
||
+ .end = SDIO1_IRQ,
|
||
+ .flags = IORESOURCE_IRQ,
|
||
+ }
|
||
+
|
||
+};
|
||
+
|
||
+struct arasan_platform_data arasan1_platform_data = {
|
||
+ .need_poll = 0,
|
||
+ .need_detect = 1,
|
||
+ .use_pio = 0,
|
||
+ .auto_cmd12 = 1,
|
||
+ .card_irq = 0,
|
||
+
|
||
+ .p_powerup = NULL,
|
||
+};
|
||
+/* sdio 1 */
|
||
+static struct platform_device arasan1_device = {
|
||
+ .id = 1,
|
||
+ .name = ARASAN_DRIVER_NAME,
|
||
+ .num_resources = ARRAY_SIZE(arasan1_resource),
|
||
+ .resource = arasan1_resource,
|
||
+ .dev = {
|
||
+ .coherent_dma_mask = DMA_BIT_MASK(32),
|
||
+ .platform_data = &arasan1_platform_data,
|
||
+ .release = arasan_release,
|
||
+ }
|
||
+};
|
||
+#endif
|
||
+static struct platform_driver arasan_driver = {
|
||
+ .probe = arasan_probe,
|
||
+ .remove = arasan_remove,
|
||
+ .driver = {
|
||
+ .name = ARASAN_DRIVER_NAME,
|
||
+ .owner = THIS_MODULE,
|
||
+ },
|
||
+};
|
||
+
|
||
+
|
||
+static int __init arasan_init(void)
|
||
+{
|
||
+ //printk(KERN_EMERG"arasan_init.\n");
|
||
+ //platform_device_register(&arasan1_device);
|
||
+ //printk(KERN_EMERG"platform_river_register.\n");
|
||
+ return platform_driver_register(&arasan_driver);
|
||
+}
|
||
+
|
||
+static void __exit arasan_exit(void)
|
||
+{
|
||
+ //platform_device_unregister(&arasan1_device);
|
||
+ platform_driver_unregister(&arasan_driver);
|
||
+}
|
||
+
|
||
+#if 0
|
||
+static int __init arasan_cmdline_opt(char *str)
|
||
+{
|
||
+ char *opt;
|
||
+
|
||
+ if (!str || !*str)
|
||
+ return -EINVAL;
|
||
+
|
||
+ while ((opt = strsep(&str, ",")) != NULL) {
|
||
+ if (!strncmp(opt, "maxfreq:", 8))
|
||
+ strict_strtoul(opt + 8, 0, (unsigned long *)&maxfreq);
|
||
+ else if (!strncmp(opt, "adma:", 5))
|
||
+ strict_strtoul(opt + 5, 0, (unsigned long *)&adma);
|
||
+ else if (!strncmp(opt, "led:", 4))
|
||
+ strict_strtoul(opt + 4, 0, (unsigned long *)&led);
|
||
+ else if (!strncmp(opt, "pio:", 4))
|
||
+ strict_strtoul(opt + 4, 0, (unsigned long *)&pio);
|
||
+ }
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+__setup("arasanmmc=", arasan_cmdline_opt);
|
||
+#endif
|
||
+
|
||
+module_init(arasan_init);
|
||
+module_exit(arasan_exit);
|
||
+
|
||
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@xxxxxx>");
|
||
+MODULE_DESCRIPTION("Arasan MMC/SD/SDIO Host Controller driver");
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/mmc/host/arasan.h linux-3.0.101.xm510/drivers/mmc/host/arasan.h
|
||
--- linux-3.0.101/drivers/mmc/host/arasan.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mmc/host/arasan.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,239 @@
|
||
+/*
|
||
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@xxxxxx>
|
||
+ *
|
||
+ * copyright (c) 2010 stmicroelectronics ltd
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#ifndef __ARASAN_H
|
||
+#define __ARASAN_H
|
||
+
|
||
+//#define ARASAN_CLOCKRATE_MAX 25000000
|
||
+#define ARASAN_CLOCKRATE_MAX 50000000
|
||
+#define ARASAN_DRIVER_NAME "arasan"
|
||
+#define ARASAN_DMA_DESC_NUM 128
|
||
+
|
||
+/*
|
||
+ * Register offsets
|
||
+ */
|
||
+#define ARASAN_SDMA_SYS_ADDR 0x000
|
||
+#define ARASAN_BLK_SIZE 0x004
|
||
+#define ARASAN_BLK_COUNT 0x006
|
||
+#define ARASAN_ARG 0x008
|
||
+#define ARASAN_XFER_MODE 0x00c
|
||
+#define ARASAN_CMD 0x00e
|
||
+#define ARASAN_RSP(i) (0x010 + ((i)<<2))
|
||
+#define ARASAN_RSP0 0x010
|
||
+#define ARASAN_RSP1 0x012
|
||
+#define ARASAN_RSP2 0x014
|
||
+#define ARASAN_RSP3 0x016
|
||
+#define ARASAN_RSP4 0x018
|
||
+#define ARASAN_RSP5 0x01a
|
||
+#define ARASAN_RSP6 0x01c
|
||
+#define ARASAN_RSP7 0x01e
|
||
+#define ARASAN_BUFF 0x020
|
||
+#define ARASAN_PRESENT_STATE 0x024
|
||
+#define ARASAN_HOST_CTRL 0x028
|
||
+#define ARASAN_PWR_CTRL 0x029
|
||
+#define ARASAN_GAP_CTRL 0x02a
|
||
+#define ARASAN_GAP_WAKEUP 0x02b
|
||
+#define ARASAN_CLOCK_CTRL 0x02c
|
||
+#define ARASAN_TIMEOUT_CTRL 0x02e
|
||
+#define ARASAN_SW_RESET 0x02f
|
||
+
|
||
+#define ARASAN_NORMAL_INT_STATUS 0x030
|
||
+#define ARASAN_ERR_INT_STATUS 0x032
|
||
+#define ARASAN_NORMAL_INT_STATUS_EN 0x034
|
||
+#define ARASAN_ERR_INT_STATUS_EN 0x036
|
||
+#define ARASAN_NORMAL_INT_SIGN_EN 0x038
|
||
+#define ARASAN_ERR_INT_SIGN_EN 0x03a
|
||
+
|
||
+#define ARASAN_CMD12_ERR_STATUS 0x03c
|
||
+
|
||
+#define ARASAN_CAPABILITIES 0x040
|
||
+
|
||
+#define ARASAN_ADMA_ERR_STATUS 0x054
|
||
+#define ARASAN_ADMA_ADDRESS 0x058
|
||
+
|
||
+#define ARASAN_SPI_INT_SUPPORT 0x0f0
|
||
+#define ARASAN_HOST_VERSION 0x0fe
|
||
+
|
||
+/* Error Interrupt Status Register */
|
||
+#define ARASAN_CMD_TIMEOUT (1 << 0)
|
||
+#define ARASAN_CMD_CRC_ERROR (1 << 1)
|
||
+#define ARASAN_CMD_END_BIT_ERROR (1 << 2)
|
||
+#define ARASAN_CMD_INDEX_ERROR (1 << 3)
|
||
+#define ARASAN_DATA_TIMEOUT_ERROR (1 << 4)
|
||
+#define ARASAN_DATA_CRC_ERROR (1 << 5)
|
||
+#define ARASAN_DATA_END_ERROR (1 << 6)
|
||
+#define ARASAN_CURRENT_LIMIT_ERROR (1 << 7)
|
||
+#define ARASAN_AUTO_CMD12_ERROR (1 << 8)
|
||
+#define ARASAN_ADMA_ERROR (1 << 9)
|
||
+#define ARASAN_TARGET_RESP_ERROR (1 << 12)
|
||
+#define ARASAN_CEATA_ERROR (1 << 13)
|
||
+
|
||
+/* Error Interrupt Status ENABLE reg. (0- Masked, 1: Enabled) */
|
||
+#define ARASAN_E_EN_CMD_TIMEOUT (1 << 0)
|
||
+#define ARASAN_E_EN_CMD_CRC_ERROR (1 << 1)
|
||
+#define ARASAN_E_EN_CMD_END_BIT_ERROR (1 << 2)
|
||
+#define ARASAN_E_EN_CMD_INDEX_ERROR (1 << 3)
|
||
+#define ARASAN_E_EN_DATA_TIMEOUT_ERROR (1 << 4)
|
||
+#define ARASAN_E_EN_DATA_CRC_ERROR (1 << 5)
|
||
+#define ARASAN_E_EN_DATA_END_ERROR (1 << 6)
|
||
+#define ARASAN_E_EN_CURRENT_LIMIT_ERROR (1 << 7)
|
||
+#define ARASAN_E_EN_AUTO_CMD12_ERROR (1 << 8)
|
||
+#define ARASAN_E_EN_ADMA_ERROR (1 << 9)
|
||
+#define ARASAN_E_EN_TARGET_RESP_ERROR (1 << 12)
|
||
+#define ARASAN_E_EN_CEATA_ERROR (1 << 13)
|
||
+
|
||
+/* Normal Interrupt Status Register */
|
||
+#define ARASAN_N_CMD_COMPLETE (1 << 0)
|
||
+#define ARASAN_N_TRANS_COMPLETE (1 << 1)
|
||
+#define ARASAN_N_BLK_GAP_EVENT (1 << 2)
|
||
+#define ARASAN_N_DMA_IRQ (1 << 3)
|
||
+#define ARASAN_N_BUFF_WRITE (1 << 4)
|
||
+#define ARASAN_N_BUFF_READ (1 << 5)
|
||
+#define ARASAN_N_CARD_INS (1 << 6)
|
||
+#define ARASAN_N_CARD_REMOVAL (1 << 7)
|
||
+#define ARASAN_N_CARD_IRQ (1 << 8)
|
||
+#define ARASAN_N_ERROR_IRQ (1 << 15)
|
||
+
|
||
+/* Normal Interrupt Status ENABLE reg. (0- Masked, 1: Enabled) */
|
||
+#define ARASAN_N_EN_CMD_COMPLETE (1 << 0)
|
||
+#define ARASAN_N_EN_TRANS_COMPL (1 << 1)
|
||
+#define ARASAN_N_EN_BLOCK_GAP (1 << 2)
|
||
+#define ARASAN_N_EN_DMA_IRQ (1 << 3)
|
||
+#define ARASAN_N_EN_BUFF_WRITE (1 << 4)
|
||
+#define ARASAN_N_EN_BUFF_READ (1 << 5)
|
||
+#define ARASAN_N_EN_CARD_INS (1 << 6)
|
||
+#define ARASAN_N_EN_CARD_REM (1 << 7)
|
||
+#define ARASAN_N_EN_CARD_IRQ (1 << 8)
|
||
+
|
||
+/* Default Enable Normal/Error interrupt mask */
|
||
+#define ARASAN_IRQ_DEFAULT_MASK 0x02ff01fb
|
||
+#define ARASAN_IRQ_DEFAULT_MASK_NOCARDIRQ 0x02ff00fb
|
||
+
|
||
+/* Mask normal and error fields */
|
||
+#define ARASAN_INT_DATA_MASK 0x0070003a
|
||
+#define ARASAN_INT_CMD_MASK 0x000f0001
|
||
+
|
||
+/* Command Register */
|
||
+#define ARASAN_CMD_RSP_NONE (0 << 0)
|
||
+#define ARASAN_CMD_RSP_136 (1 << 0)
|
||
+#define ARASAN_CMD_RSP_48 (2 << 0)
|
||
+#define ARASAN_CMD_RSP_48BUSY (3 << 0)
|
||
+#define ARASAN_CMD_CHECK_CMDCRC (1 << 3)
|
||
+#define ARASAN_CMD_INDX_CHECK (1 << 4)
|
||
+#define ARASAN_CMD_DATA_PRESENT (1 << 5)
|
||
+#define ARASAN_COMMAD_TYPE_NORM (0 << 6)
|
||
+#define ARASAN_COMMAD_TYPE_SUSP (1 << 6)
|
||
+#define ARASAN_COMMAD_TYPE_RESU (2 << 6)
|
||
+#define ARASAN_COMMAD_TYPE_ABOR (3 << 6)
|
||
+#define ARASAN_CMD_INDEX(x) ((x) << 8)
|
||
+
|
||
+/* Transfer Mode Register */
|
||
+#define ARASAN_XFER_DMA_EN (1 << 0)
|
||
+#define ARASAN_XFER_BLK_COUNT_EN (1 << 1)
|
||
+#define ARASAN_XFER_AUTOCMD12 (1 << 2) /* 1: Enable */
|
||
+#define ARASAN_XFER_DATA_DIR (1 << 4) /* 0: Write, 1: Read */
|
||
+#define ARASAN_XFER_MULTI_BLK (1 << 5) /* 0: Single 1: Multi */
|
||
+#define ARASAN_XFER_SPI_MODE (1 << 7) /* 1: SPI 0: SD Mode */
|
||
+
|
||
+enum xfer_dat_cmd_status {
|
||
+STATE_CMD = 0,
|
||
+STATE_DATA_WRITE = 1,
|
||
+STATE_DATA_READ = 2,
|
||
+STATE_DATA_STOP = 3,
|
||
+};
|
||
+
|
||
+/* Software Reset */
|
||
+#define ARSAN_RESET_ALL 0x1
|
||
+#define ARSAN_RESET_CMD_LINE 0x2
|
||
+#define ARSAN_RESET_DAT_LINE 0x4
|
||
+
|
||
+enum sw_reset_cmd {
|
||
+ reset_all = 0,
|
||
+ reset_cmd_line = 1,
|
||
+ reset_dat_line = 2,
|
||
+};
|
||
+
|
||
+/* Host Control Register */
|
||
+#define ARASAN_HOST_CTRL_LED (1 << 0)
|
||
+#define ARASAN_HOST_CTRL_SD (1 << 1) /* 1: 4 bit mode */
|
||
+#define ARASAN_HOST_CTRL_HIGH_SPEED (1 << 2)
|
||
+#define ARASAN_HOST_CTRL_SDMA_SEL (0 << 3)
|
||
+#define ARASAN_HOST_CTRL_ADMA1 (1 << 3)
|
||
+#define ARASAN_HOST_CTRL_ADMA2_32 (2 << 3)
|
||
+#define ARASAN_HOST_CTRL_ADMA2_64 (3 << 3)
|
||
+#define ARASAN_HOST_CTRL_SD8 (1 << 5)
|
||
+#define ARASAN_HOST_CTRL_CARD_LEV_TEST (1 << 6)
|
||
+#define ARASAN_HOST_CTRL_CARD_SIG_TEST (1 << 7)
|
||
+
|
||
+#define ARASAN_HOST_CTRL_SD_MASK 0x22
|
||
+
|
||
+/* Clock Control Register */
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_MASK 0xff00
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_SHIFT 7
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_256 0x8000
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_128 0x4000
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_64 0x2000
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_32 0x1000
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_16 0x0800
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_8 0x0400
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_4 0x0200
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_2 0x0100
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_1 0x0000
|
||
+#define ARASAN_CLOCK_CTRL_SDCLK_ENABLE (1 << 2)
|
||
+#define ARASAN_CLOCK_CTRL_ICLK_STABLE (1 << 1)
|
||
+#define ARASAN_CLOCK_CTRL_ICLK_ENABLE (1 << 0)
|
||
+
|
||
+/* Power Control Register */
|
||
+#define ARASAN_PWR_CTRL_UP (1 << 0) /* 1: Power-On */
|
||
+#define ARASAN_PWR_BUS_VOLTAGE_33 (7 << 1)
|
||
+#define ARASAN_PWR_BUS_VOLTAGE_30 (6 << 1)
|
||
+#define ARASAN_PWR_BUS_VOLTAGE_18 (5 << 1)
|
||
+
|
||
+/* CMD12 error status bits */
|
||
+#define ARASAN_AUTOCMD12_ERR_NOTEXE (1 << 0)
|
||
+#define ARASAN_AUTOCMD12_ERR_TIMEOUT (1 << 1)
|
||
+#define ARASAN_AUTOCMD12_ERR_CRC (1 << 2)
|
||
+#define ARASAN_AUTOCMD12_ERR_ENDBIT (1 << 3)
|
||
+#define ARASAN_AUTOCMD12_ERR_INDEX (1 << 4)
|
||
+#define ARASAN_AUTOCMD12_ERR_NOT_ISSUED (1 << 7)
|
||
+
|
||
+/* Present State Register */
|
||
+#define ARASAN_PRESENT_STATE_DAT7_4 0x1e000000
|
||
+#define ARASAN_PRESENT_STATE_CMD_LINE 0x01000000
|
||
+#define ARASAN_PRESENT_STATE_DAT3_0 0x00f00000
|
||
+#define ARASAN_PRESENT_STATE_WR_EN 0x00080000
|
||
+#define ARASAN_PRESENT_STATE_CARD_DETECT 0x00040000
|
||
+#define ARASAN_PRESENT_STATE_CARD_STABLE 0x00020000
|
||
+#define ARASAN_PRESENT_STATE_CARD_PRESENT 0x00010000
|
||
+#define ARASAN_PRESENT_STATE_BUFFER_RD_EN 0x00000800
|
||
+#define ARASAN_PRESENT_STATE_BUFFER_WR_EN 0x00000400
|
||
+#define ARASAN_PRESENT_STATE_RD_ACTIVE 0x00000200
|
||
+#define ARASAN_PRESENT_STATE_WR_ACTIVE 0x00000100
|
||
+#define ARASAN_PRESENT_STATE_DAT_ACTIVE 0x00000004
|
||
+#define ARASAN_PRESENT_STATE_DAT_INHIBIT 0x00000002
|
||
+#define ARASAN_PRESENT_STATE_CMD_INHIBIT 0x00000001
|
||
+
|
||
+/* Block size register defines */
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_512KB 0x7000
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_256KB 0x6000
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_128KB 0x5000
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_64KB 0x4000
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_32KB 0x3000
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_16KB 0x2000
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_8KB 0x1000
|
||
+#define ARASAN_BLOCK_SIZE_SDMA_4KB 0x0000
|
||
+#define ARASAN_BLOCK_SIZE_TRANSFER 0x0fff
|
||
+
|
||
+/* ADMA Error Status Register */
|
||
+#define ARASAN_ADMA_ERROR_LENGTH 0x04
|
||
+#define ARASAN_ADMA_ERROR_ST_TFR 0x03
|
||
+#define ARASAN_ADMA_ERROR_ST_FDS 0x01
|
||
+#define ARASAN_ADMA_ERROR_ST_STOP 0x00
|
||
+#endif
|
||
diff -urN linux-3.0.101/drivers/mmc/host/Kconfig linux-3.0.101.xm510/drivers/mmc/host/Kconfig
|
||
--- linux-3.0.101/drivers/mmc/host/Kconfig 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mmc/host/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -429,6 +429,24 @@
|
||
|
||
endchoice
|
||
|
||
+config MMC_ARASAN
|
||
+ tristate "Arasan MMC/SD/SDIO host driver"
|
||
+ help
|
||
+ This selects the Arasan MMC/SD/SDIO host controller integrated
|
||
+ in the xm510 platform.
|
||
+
|
||
+config MMC_WIFI
|
||
+ tristate "WIFI-W8782"
|
||
+ help
|
||
+ This selects the WIFI-W8782 integrated
|
||
+ in the xm510 platform.
|
||
+config MMC_SD
|
||
+ tristate "SD-CARD"
|
||
+ help
|
||
+ This selects the SD-CARD integrated
|
||
+ in the xm510 platform.
|
||
+
|
||
+
|
||
config MMC_SDRICOH_CS
|
||
tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
|
||
depends on EXPERIMENTAL && PCI && PCMCIA
|
||
diff -urN linux-3.0.101/drivers/mmc/host/Makefile linux-3.0.101.xm510/drivers/mmc/host/Makefile
|
||
--- linux-3.0.101/drivers/mmc/host/Makefile 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mmc/host/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -40,6 +40,9 @@
|
||
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
|
||
obj-$(CONFIG_MMC_DW) += dw_mmc.o
|
||
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
|
||
+obj-$(CONFIG_MMC_ARASAN) += arasan.o
|
||
+obj-$(CONFIG_MMC_WIFI) += sdio2_wifi.o
|
||
+obj-$(CONFIG_MMC_SD) += sdio1_sd.o
|
||
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
|
||
obj-$(CONFIG_MMC_VUB300) += vub300.o
|
||
obj-$(CONFIG_MMC_USHC) += ushc.o
|
||
diff -urN linux-3.0.101/drivers/mmc/host/sdio1_sd.c linux-3.0.101.xm510/drivers/mmc/host/sdio1_sd.c
|
||
--- linux-3.0.101/drivers/mmc/host/sdio1_sd.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mmc/host/sdio1_sd.c 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -0,0 +1,180 @@
|
||
+#include <linux/module.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/mbus.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/scatterlist.h>
|
||
+#include <linux/irq.h>
|
||
+#include <linux/highmem.h>
|
||
+#include <linux/sched.h>
|
||
+#include <linux/mmc/host.h>
|
||
+#include <linux/mmc/arasan_plat.h>
|
||
+
|
||
+#include <asm/sizes.h>
|
||
+#include <asm/unaligned.h>
|
||
+
|
||
+#include <mach/hardware.h>
|
||
+
|
||
+#include "arasan.h"
|
||
+
|
||
+static int detect = 0;
|
||
+static unsigned int value = 0x14;
|
||
+module_param(value,int,S_IRUSR);
|
||
+module_param(detect,int,S_IRUSR);
|
||
+
|
||
+static void arasan_release(struct device *device)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+static struct resource arasan1_resource[] = {
|
||
+ [0] = {
|
||
+ .name = "sdio1_addr",
|
||
+ .start = SDIO1_BASE,
|
||
+ .end = SDIO1_BASE + 0x10000 - 1,
|
||
+ .flags = IORESOURCE_MEM,
|
||
+ },
|
||
+ [1] = {
|
||
+ .name = "sdio1_irq",
|
||
+ .start = SDIO1_IRQ,
|
||
+ .end = SDIO1_IRQ,
|
||
+ .flags = IORESOURCE_IRQ,
|
||
+ }
|
||
+
|
||
+};
|
||
+#ifndef GPIO_BASE
|
||
+ #define GPIO_BASE (0x10020000)
|
||
+#endif
|
||
+#ifndef GPIO5_MULT_USE_EN
|
||
+ #define GPIO5_MULT_USE_EN (0x14)
|
||
+#endif
|
||
+
|
||
+
|
||
+
|
||
+
|
||
+int GpioValueRead(unsigned int value)
|
||
+{
|
||
+ unsigned int PortIndex = (value&0xF0)>>4;
|
||
+ unsigned int BitIndex = value&0x0F;
|
||
+ unsigned int gpio_dir_value = 0;
|
||
+ unsigned int gpio_value = 0;
|
||
+ unsigned int gpio_bitdata_value = 0;
|
||
+ //printk("PortIndex = %x BitIndex = %x\n",PortIndex,BitIndex);
|
||
+ if(PortIndex > 6)
|
||
+ {
|
||
+ printk("error : PortIndex = %d\n",PortIndex);
|
||
+ return -1;
|
||
+ }
|
||
+ gpio_dir_value = readl(IO_ADDRESS(GPIO_BASE + PortIndex*0x04));
|
||
+ //printk("@@@@gpio_dir_value = %x@@@\n",gpio_dir_value);
|
||
+ gpio_dir_value &= ~(1 << BitIndex | 1 << (8 +BitIndex) |1 << (16 + BitIndex));
|
||
+ //printk("!!!!gpio_dir_value = %x!!! \n",gpio_dir_value);
|
||
+ writel(gpio_dir_value,IO_ADDRESS(GPIO_BASE + PortIndex*0x04));
|
||
+ //----------------------------------------
|
||
+ //使能
|
||
+ gpio_bitdata_value = readl(IO_ADDRESS(GPIO_BASE + 0x44 +PortIndex*0x10));
|
||
+ gpio_bitdata_value |=(1 << BitIndex);
|
||
+ writel(gpio_bitdata_value,IO_ADDRESS(GPIO_BASE + 0x44 +PortIndex*0x10));
|
||
+ //配置
|
||
+ gpio_value = readl(IO_ADDRESS(GPIO_BASE + 0x40 +PortIndex*0x10 ));
|
||
+ gpio_value &=~(1 << BitIndex);
|
||
+ writel(gpio_value,IO_ADDRESS(GPIO_BASE + 0x40 +PortIndex*0x10 ));
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+
|
||
+/* GPIO复用到sdio1 */
|
||
+void arasan1_powerup(void)
|
||
+{
|
||
+ unsigned int gpio_5_reuse,gpio_5_reuse_value;
|
||
+ void __iomem *gpio_5_resue_addr = NULL;
|
||
+ int ret = 0;
|
||
+
|
||
+
|
||
+ gpio_5_reuse = GPIO_BASE+GPIO5_MULT_USE_EN;
|
||
+ if (!request_mem_region(gpio_5_reuse,4,"gpio5_sdio1")) {
|
||
+ pr_err("%s: ERROR: memory allocation failed"
|
||
+ "cannot get the I/O addr 0x%x\n",
|
||
+ __func__, (unsigned int)gpio_5_reuse);
|
||
+ return;
|
||
+ }
|
||
+ gpio_5_resue_addr = ioremap(gpio_5_reuse,4);
|
||
+ if (!gpio_5_resue_addr) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_release_region;
|
||
+ }
|
||
+ /* 分配给sdio1 */
|
||
+ gpio_5_reuse_value = readl(gpio_5_resue_addr);
|
||
+ gpio_5_reuse_value |= 0xff;
|
||
+ if(detect)
|
||
+ {
|
||
+ gpio_5_reuse_value &= ~(0x1<<6); //open hot plug
|
||
+ //printk("open hot plug\n");
|
||
+ }
|
||
+ else
|
||
+ {
|
||
+ gpio_5_reuse_value &= ~(0xf<<4); // no detect, no powersave, // no write protect, no ledo
|
||
+ //printk("no hot plug\n");
|
||
+ }
|
||
+ writel(gpio_5_reuse_value, gpio_5_resue_addr);
|
||
+ //gpio_5_reuse_value = readl(gpio_5_resue_addr);
|
||
+
|
||
+ iounmap(gpio_5_resue_addr);
|
||
+
|
||
+out_release_region:
|
||
+ release_mem_region(gpio_5_reuse, 4);
|
||
+ return;
|
||
+
|
||
+}
|
||
+
|
||
+struct arasan_platform_data arasan1_platform_data = {
|
||
+ .need_poll = 0,
|
||
+ .need_detect = 0,
|
||
+ .use_pio = 0,
|
||
+ .auto_cmd12 = 1,
|
||
+ .card_irq = 0,
|
||
+ .sdio_4bit_data = 1,
|
||
+
|
||
+ .p_powerup = arasan1_powerup,
|
||
+};
|
||
+
|
||
+/* sdio 1 */
|
||
+static struct platform_device arasan1_device = {
|
||
+ .id = 1,
|
||
+ .name = ARASAN_DRIVER_NAME,
|
||
+ .num_resources = ARRAY_SIZE(arasan1_resource),
|
||
+ .resource = arasan1_resource,
|
||
+ .dev = {
|
||
+ .coherent_dma_mask = DMA_BIT_MASK(32),
|
||
+ .platform_data = &arasan1_platform_data,
|
||
+ .release = arasan_release,
|
||
+ }
|
||
+};
|
||
+
|
||
+static int __init arasan1_init(void)
|
||
+{
|
||
+ if(detect)
|
||
+ {
|
||
+ GpioValueRead(value);
|
||
+ arasan1_platform_data.need_detect = 1;
|
||
+ }
|
||
+ platform_device_register(&arasan1_device);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void __exit arasan1_exit(void)
|
||
+{
|
||
+ platform_device_unregister(&arasan1_device);
|
||
+}
|
||
+
|
||
+module_init(arasan1_init);
|
||
+module_exit(arasan1_exit);
|
||
+
|
||
+MODULE_LICENSE("GPL");
|
||
+
|
||
diff -urN linux-3.0.101/drivers/mmc/host/sdio2_wifi.c linux-3.0.101.xm510/drivers/mmc/host/sdio2_wifi.c
|
||
--- linux-3.0.101/drivers/mmc/host/sdio2_wifi.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mmc/host/sdio2_wifi.c 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -0,0 +1,273 @@
|
||
+/*
|
||
+ * Arasan MMC/SD/SDIO driver
|
||
+ *
|
||
+ * This is the driver for the Arasan MMC/SD/SDIO host controller
|
||
+ * integrated in the STMicroelectronics platforms
|
||
+ *
|
||
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@xxxxxx>
|
||
+ * Copyright (C) 2010 STMicroelectronics Ltd
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#include <linux/module.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/mbus.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/scatterlist.h>
|
||
+#include <linux/irq.h>
|
||
+#include <linux/highmem.h>
|
||
+#include <linux/sched.h>
|
||
+#include <linux/mmc/host.h>
|
||
+#include <linux/mmc/arasan_plat.h>
|
||
+
|
||
+#include <asm/sizes.h>
|
||
+#include <asm/unaligned.h>
|
||
+
|
||
+#include <mach/hardware.h>
|
||
+
|
||
+#include "arasan.h"
|
||
+
|
||
+static int value = 0x54 ;
|
||
+module_param(value,int,S_IRUSR);
|
||
+
|
||
+static struct resource arasan2_resource[] = {
|
||
+ [0] = {
|
||
+ .name = "sdio2_addr",
|
||
+ .start = SDIO2_BASE,
|
||
+ .end = SDIO2_BASE + 0x10000 - 1,
|
||
+ .flags = IORESOURCE_MEM,
|
||
+ },
|
||
+ [1] = {
|
||
+ .name = "sdio2_irq",
|
||
+ .start = SDIO2_IRQ,
|
||
+ .end = SDIO2_IRQ,
|
||
+ .flags = IORESOURCE_IRQ,
|
||
+ }
|
||
+
|
||
+};
|
||
+
|
||
+struct resource *port0_r;
|
||
+unsigned int port0_addr;
|
||
+
|
||
+static void arasan_release(struct device *device)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+#ifndef GPIO_BASE
|
||
+ #define GPIO_BASE (0x10020000)
|
||
+#endif
|
||
+#ifndef GPIO6_MULT_USE_EN
|
||
+ #define GPIO6_MULT_USE_EN (0x18)
|
||
+#endif
|
||
+
|
||
+
|
||
+
|
||
+void gpio6_reuse_sdio2(void)
|
||
+{
|
||
+ unsigned int gpio_6_reuse,gpio_6_reuse_value;
|
||
+ void __iomem *gpio_6_resue_addr = NULL;
|
||
+ int ret = 0;
|
||
+
|
||
+ gpio_6_reuse = GPIO_BASE+GPIO6_MULT_USE_EN;
|
||
+ if (!request_mem_region(gpio_6_reuse,4,"gpio6_sdio2")) {
|
||
+ pr_err("%s: ERROR: memory allocation failed"
|
||
+ "cannot get the I/O addr 0x%x\n",
|
||
+ __func__, (unsigned int)gpio_6_reuse);
|
||
+ return;
|
||
+ }
|
||
+ gpio_6_resue_addr = ioremap(gpio_6_reuse,4);
|
||
+ if (!gpio_6_resue_addr) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_release_region;
|
||
+ }
|
||
+ /* <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>sdio2 */
|
||
+ //gpio_6_reuse_value = readl(gpio_6_resue_addr);
|
||
+ //gpio_6_reuse_value = readl(gpio_6_resue_addr);
|
||
+ //gpio_6_reuse_value = 0xff;
|
||
+ //gpio_6_reuse_value &= ~(0x5<<4); /* sdio1-cdtct未复用,sd1-wptct不复用 */
|
||
+ gpio_6_reuse_value = 0xf;
|
||
+ writel(gpio_6_reuse_value, gpio_6_resue_addr);
|
||
+ gpio_6_reuse_value = readl(gpio_6_resue_addr);
|
||
+ //printk("value=%02x\n", gpio_6_reuse_value);
|
||
+
|
||
+ iounmap(gpio_6_resue_addr);
|
||
+
|
||
+out_release_region:
|
||
+ release_mem_region(gpio_6_reuse, 4);
|
||
+ return;
|
||
+
|
||
+}
|
||
+
|
||
+void gpio6_46_outlow(void)
|
||
+{
|
||
+ unsigned int gpio_6,gpio_6_value;
|
||
+ void __iomem *gpio_6_addr = NULL;
|
||
+ int ret = 0;
|
||
+
|
||
+ gpio_6 = GPIO_BASE+0xA0;
|
||
+ if (!request_mem_region(gpio_6,12,"gpio6")) {
|
||
+ pr_err("%s: ERROR: memory allocation failed"
|
||
+ "cannot get the I/O addr 0x%x\n",
|
||
+ __func__, (unsigned int)gpio_6);
|
||
+ return;
|
||
+ }
|
||
+ gpio_6_addr = ioremap(gpio_6,12);
|
||
+ if (!gpio_6_addr) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_release_region;
|
||
+ }
|
||
+
|
||
+ /* gpio_6_4, gpio_6_6<5F><36><EFBFBD><EFBFBD>ʹ<EFBFBD><CAB9> */
|
||
+ gpio_6_value = readl(gpio_6_addr+4);
|
||
+ gpio_6_value |= 0x5<<4;
|
||
+ writel(gpio_6_value, gpio_6_addr+4);
|
||
+
|
||
+ /* gpio_6_4<5F><34>, gpio_6_6<5F><36><EFBFBD><EFBFBD><EFBFBD><EFBFBD> */
|
||
+ gpio_6_value = readl(gpio_6_addr);
|
||
+ //gpio_6_value &= ~(0x5<<4);
|
||
+ gpio_6_value &= ~(0x1<<6);
|
||
+ //gpio_6_value &= ~(0x1<<4);
|
||
+ writel(gpio_6_value, gpio_6_addr);
|
||
+ mdelay(100);
|
||
+ gpio_6_value |= 0x1<<6;
|
||
+ writel(gpio_6_value, gpio_6_addr);
|
||
+ mdelay(100);
|
||
+
|
||
+ iounmap(gpio_6_addr);
|
||
+
|
||
+out_release_region:
|
||
+ release_mem_region(gpio_6, 12);
|
||
+ return;
|
||
+}
|
||
+void gpio5_6_input(void)
|
||
+{
|
||
+ unsigned int gpio_5,gpio_5_value;
|
||
+ void __iomem *gpio_5_addr = NULL;
|
||
+ int ret = 0;
|
||
+
|
||
+ gpio_5 = GPIO_BASE+0x90;
|
||
+ if (!request_mem_region(gpio_5,12,"gpio5")) {
|
||
+ pr_err("%s: ERROR: memory allocation failed"
|
||
+ "cannot get the I/O addr 0x%x\n",
|
||
+ __func__, (unsigned int)gpio_5);
|
||
+ return;
|
||
+ }
|
||
+ gpio_5_addr = ioremap(gpio_5,12);
|
||
+ if (!gpio_5_addr) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_release_region;
|
||
+ }
|
||
+
|
||
+ /* gpio_5_6<5F><36><EFBFBD><EFBFBD>ʹ<EFBFBD><CAB9> */
|
||
+ gpio_5_value = 0;
|
||
+ gpio_5_value = readl(gpio_5_addr+4);
|
||
+ gpio_5_value &= ~(0x1<<6);
|
||
+ writel(gpio_5_value, gpio_5_addr+4);
|
||
+
|
||
+ iounmap(gpio_5_addr);
|
||
+
|
||
+out_release_region:
|
||
+ release_mem_region(gpio_5, 12);
|
||
+ return;
|
||
+}
|
||
+
|
||
+
|
||
+int wifi_powerup(unsigned int value)
|
||
+{
|
||
+ unsigned int PortIndex = (value&0xF0)>>4;
|
||
+ unsigned int BitIndex = value&0x0F;
|
||
+ unsigned int gpio_dir_value = 0;
|
||
+ unsigned int gpio_value = 0;
|
||
+ unsigned int gpio_bitdata_value = 0;
|
||
+
|
||
+ //printk("PortIndex = %x BitIndex = %x\n",PortIndex,BitIndex);
|
||
+
|
||
+ if(PortIndex > 6)
|
||
+ {
|
||
+ printk("error : PortIndex = %d\n",PortIndex);
|
||
+ return -1;
|
||
+ }
|
||
+ //<2F><><EFBFBD><EFBFBD>
|
||
+ gpio_dir_value = readl(IO_ADDRESS(GPIO_BASE + PortIndex*0x04));
|
||
+ //printk("@@@@gpio_dir_value = %x@@@\n",gpio_dir_value);
|
||
+ gpio_dir_value &= ~(1 << BitIndex | 1 << (8 +BitIndex) |1 << (16 + BitIndex));
|
||
+ //printk("!!!!gpio_dir_value = %x!!! \n",gpio_dir_value);
|
||
+ writel(gpio_dir_value,IO_ADDRESS(GPIO_BASE + PortIndex*0x04));
|
||
+ //ʹ<><CAB9>
|
||
+ gpio_bitdata_value = readl(IO_ADDRESS(GPIO_BASE + 0x44 +PortIndex*0x10));
|
||
+ gpio_bitdata_value |=(1 << BitIndex);
|
||
+ writel(gpio_bitdata_value,IO_ADDRESS(GPIO_BASE + 0x44 +PortIndex*0x10));
|
||
+ //<2F><><EFBFBD><EFBFBD>//<2F>ȵͺ<C8B5><CDBA>
|
||
+ gpio_value = readl(IO_ADDRESS(GPIO_BASE + 0x40 +PortIndex*0x10 ));
|
||
+ gpio_value &=~(1 << BitIndex);
|
||
+ writel(gpio_value,IO_ADDRESS(GPIO_BASE + 0x40 +PortIndex*0x10 ));
|
||
+ mdelay(100);
|
||
+
|
||
+ gpio_value = readl(IO_ADDRESS(GPIO_BASE + 0x40 +PortIndex*0x10 ));
|
||
+ gpio_value |=(1 << BitIndex);
|
||
+ writel(gpio_value,IO_ADDRESS(GPIO_BASE + 0x40 +PortIndex*0x10 ));
|
||
+ mdelay(100);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/* GPIO<49><4F><EFBFBD>õ<EFBFBD>sdio2 */
|
||
+void arasan2_powerup(void)
|
||
+{
|
||
+ gpio6_reuse_sdio2();
|
||
+ mdelay(100);
|
||
+ //gpio5_6_input();
|
||
+ wifi_powerup(value);
|
||
+}
|
||
+
|
||
+struct arasan_platform_data arasan2_platform_data = {
|
||
+ .need_poll = 0,
|
||
+ .need_detect = 0,
|
||
+ .use_pio = 0,
|
||
+ .auto_cmd12 = 0,
|
||
+ .card_irq = 1,
|
||
+ .sdio_4bit_data = 1,
|
||
+
|
||
+ .p_powerup = arasan2_powerup,
|
||
+};
|
||
+/* sdio 2 */
|
||
+static struct platform_device arasan2_device = {
|
||
+ //.id = -1,
|
||
+ .name = ARASAN_DRIVER_NAME,
|
||
+ .num_resources = ARRAY_SIZE(arasan2_resource),
|
||
+ .resource = arasan2_resource,
|
||
+ .dev = {
|
||
+ .coherent_dma_mask = DMA_BIT_MASK(32),
|
||
+ .platform_data = &arasan2_platform_data,
|
||
+ .release = arasan_release,
|
||
+ }
|
||
+};
|
||
+
|
||
+static int __init arasan2_init(void)
|
||
+{
|
||
+ platform_device_register(&arasan2_device);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void __exit arasan2_exit(void)
|
||
+{
|
||
+ printk("arasan2_exit.\n");
|
||
+ platform_device_unregister(&arasan2_device);
|
||
+}
|
||
+
|
||
+module_init(arasan2_init);
|
||
+module_exit(arasan2_exit);
|
||
+
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/mtd/devices/Kconfig linux-3.0.101.xm510/drivers/mtd/devices/Kconfig
|
||
--- linux-3.0.101/drivers/mtd/devices/Kconfig 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mtd/devices/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -305,4 +305,5 @@
|
||
LinuxBIOS or if you need to recover a DiskOnChip Millennium on which
|
||
you have managed to wipe the first block.
|
||
|
||
+source "drivers/mtd/devices/xmsfc/Kconfig"
|
||
endmenu
|
||
diff -urN linux-3.0.101/drivers/mtd/devices/Makefile linux-3.0.101.xm510/drivers/mtd/devices/Makefile
|
||
--- linux-3.0.101/drivers/mtd/devices/Makefile 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mtd/devices/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -17,3 +17,5 @@
|
||
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
|
||
obj-$(CONFIG_MTD_M25P80) += m25p80.o
|
||
obj-$(CONFIG_MTD_SST25L) += sst25l.o
|
||
+
|
||
+obj-y += xmsfc/
|
||
diff -urN linux-3.0.101/drivers/mtd/devices/xmsfc/Kconfig linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/Kconfig
|
||
--- linux-3.0.101/drivers/mtd/devices/xmsfc/Kconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/Kconfig 2016-07-14 08:38:29.000000000 +0300
|
||
@@ -0,0 +1,8 @@
|
||
+config MTD_XMSFC
|
||
+ tristate "xm spi flash controller device driver"
|
||
+ depends on ARCH_XM510 || ARCH_XM520
|
||
+ default y if ARCH_XM510 || ARCH_XM520
|
||
+ help
|
||
+ xm spi flash controller device driver
|
||
+
|
||
+
|
||
diff -urN linux-3.0.101/drivers/mtd/devices/xmsfc/Makefile linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/Makefile
|
||
--- linux-3.0.101/drivers/mtd/devices/xmsfc/Makefile 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,4 @@
|
||
+
|
||
+obj-$(CONFIG_MTD_XMSFC) += xmsfc.o xmsfc_params.o
|
||
+
|
||
+
|
||
diff -urN linux-3.0.101/drivers/mtd/devices/xmsfc/xmsfc.c linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/xmsfc.c
|
||
--- linux-3.0.101/drivers/mtd/devices/xmsfc/xmsfc.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/xmsfc.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,370 @@
|
||
+#include <linux/init.h>
|
||
+#include <linux/module.h>
|
||
+#include <linux/device.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/mtd/mtd.h>
|
||
+#include <linux/mtd/partitions.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/sched.h>
|
||
+#include <asm/setup.h>
|
||
+#include <linux/io.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include "xmsfc.h"
|
||
+
|
||
+extern const struct xmsfc_params xmsfc_params_table[];
|
||
+extern const struct xmsfc_params xmsfc_params_default;
|
||
+
|
||
+static int xmsfc_reg_erase(struct mtd_info *mtd, struct erase_info *instr)
|
||
+{
|
||
+ unsigned long long offset = instr->addr;
|
||
+ unsigned long long length = instr->len;
|
||
+ unsigned int timeout = 0x10000000;
|
||
+ struct xmsfc_host *host = MTD_TO_HOST(mtd);
|
||
+
|
||
+ if (offset + length > mtd->size) {
|
||
+ DBG_MSG("erase area out of range of mtd.\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ if ((unsigned int)offset & (mtd->erasesize-1)) {
|
||
+ DBG_MSG("erase start address is not alignment.\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+ //
|
||
+ if ((unsigned int)length & (mtd->erasesize-1)) {
|
||
+ DBG_MSG("erase length is not alignment.\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ mutex_lock(&host->lock);
|
||
+ while (length)
|
||
+ {
|
||
+ writel(offset, XMSFC_REG_ADDR);
|
||
+ writel(host->cmd_erase, XMSFC_REG_CMD);
|
||
+ writel(XMSFC_OP2, XMSFC_REG_OP);
|
||
+
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP2_OK) == 0)
|
||
+ {
|
||
+ if(--timeout == 0)
|
||
+ {
|
||
+ instr->state = MTD_ERASE_FAILED;
|
||
+ mutex_unlock(&host->lock);
|
||
+ return -EIO;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ //this command for MPW
|
||
+ writel(0x05,XMSFC_REG_CMD);
|
||
+ writel(XMSFC_OP3, XMSFC_REG_OP);
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP3_OK) == 0)
|
||
+ {
|
||
+ if(--timeout == 0)
|
||
+ {
|
||
+ instr->state = MTD_ERASE_FAILED;
|
||
+ mutex_unlock(&host->lock);
|
||
+ return -EIO;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ offset += mtd->erasesize;
|
||
+ length -= mtd->erasesize;
|
||
+ }
|
||
+ instr->state = MTD_ERASE_DONE;
|
||
+ mutex_unlock(&host->lock);
|
||
+ mtd_erase_callback(instr);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int xmsfc_reg_write(struct mtd_info *mtd, loff_t to, size_t len,
|
||
+ size_t *retlen, const u_char *buf)
|
||
+{
|
||
+ struct xmsfc_host *host = MTD_TO_HOST(mtd);
|
||
+ unsigned char *ptr = (unsigned char *)buf;
|
||
+ int num = 0;
|
||
+ int index = 0;
|
||
+ int remain = 0;
|
||
+
|
||
+ if ((to + len) > mtd->size) {
|
||
+ DBG_MSG("write data out of range.\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ *retlen = 0;
|
||
+ if (!len) {
|
||
+ DBG_MSG("write length is 0.\n");
|
||
+ return 0;
|
||
+ }
|
||
+ mutex_lock(&host->lock);
|
||
+
|
||
+ while(len > 0)
|
||
+ {
|
||
+ remain = (to % XMSFC_REG_BUF_SIZE);
|
||
+ num = ((len >= XMSFC_REG_BUF_SIZE - remain) ? XMSFC_REG_BUF_SIZE - remain : len);
|
||
+
|
||
+ writel(to, XMSFC_REG_ADDR);
|
||
+ writel(num - 1, XMSFC_REG_WRNUM);
|
||
+
|
||
+ index = 0;
|
||
+ while(index < num)
|
||
+ {
|
||
+ writel(*(ptr + index), XMSFC_REG_BUF + 4 * index);
|
||
+ index++;
|
||
+ }
|
||
+ writel(host->cmd_write, XMSFC_REG_CMD);
|
||
+ writel(XMSFC_OP6, XMSFC_REG_OP);
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP6_OK) == 0);
|
||
+
|
||
+ //this command for MPW
|
||
+ writel(0x05,XMSFC_REG_CMD);
|
||
+ writel(XMSFC_OP3, XMSFC_REG_OP);
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP3_OK) == 0);
|
||
+
|
||
+ to += num;
|
||
+ ptr += num;
|
||
+ len -= num;
|
||
+ }
|
||
+ *retlen = (size_t)(ptr - buf);
|
||
+
|
||
+ mutex_unlock(&host->lock);
|
||
+ return 0;
|
||
+}
|
||
+static int xmsfc_bus_read(struct mtd_info *mtd, loff_t from, size_t len,
|
||
+ size_t *retlen, u_char *buf)
|
||
+{
|
||
+ int num;
|
||
+ int result = -EIO;
|
||
+ unsigned char *ptr = buf;
|
||
+ struct xmsfc_host *host = MTD_TO_HOST(mtd);
|
||
+
|
||
+ if ((from + len) > mtd->size) {
|
||
+ DBG_MSG("read area out of range.\n");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ *retlen = 0;
|
||
+ if (!len) {
|
||
+ DBG_MSG("read length is 0.\n");
|
||
+ return 0;
|
||
+ }
|
||
+
|
||
+ mutex_lock(&host->lock);
|
||
+
|
||
+ while (len > 0)
|
||
+ {
|
||
+ //num = ((from + len) >= spi->chipsize) ? (spi->chipsize - from) : len;
|
||
+ num = len;
|
||
+ memcpy(ptr, (char *)host->iobase + from, num);
|
||
+ from += num;
|
||
+ ptr += num;
|
||
+ len -= num;
|
||
+ }
|
||
+ *retlen = (size_t)(ptr - buf);
|
||
+ result = 0;
|
||
+
|
||
+ mutex_unlock(&host->lock);
|
||
+ return result;
|
||
+}
|
||
+
|
||
+
|
||
+int xmsfc_entry_quad_1addr(const struct xmsfc_params *params)
|
||
+{
|
||
+ unsigned int value = 0;
|
||
+
|
||
+ // read st
|
||
+ writel(XMSFC_CMD_READ_ST2, XMSFC_REG_CMD);
|
||
+ writel(0, XMSFC_REG_RW_SR_BSEL);
|
||
+ writel(XMSFC_OP3, XMSFC_REG_OP);
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP3_OK) == 0);
|
||
+ value = readl(XMSFC_REG_SRR);
|
||
+
|
||
+ writel((value << 8) | 0x0200 , XMSFC_REG_SRW);
|
||
+ writel(XMSFC_CMD_WRITE_ST, XMSFC_REG_CMD);
|
||
+ writel(1, XMSFC_REG_RW_SR_BSEL);
|
||
+ writel(XMSFC_OP4, XMSFC_REG_OP);
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP4_OK) == 0);
|
||
+
|
||
+ writel(XMSFC_CMD_READ_ST2, XMSFC_REG_CMD);
|
||
+ writel(0, XMSFC_REG_RW_SR_BSEL);
|
||
+ writel(XMSFC_OP3, XMSFC_REG_OP);
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP3_OK) == 0);
|
||
+ value = readl(XMSFC_REG_SRR);
|
||
+
|
||
+ if((value & 0x02) == 0)
|
||
+ {
|
||
+ return -1;
|
||
+ }
|
||
+
|
||
+ writel(params->cmd_read, XMSFC_REG_CACHE_CMD);
|
||
+ writel(0x00, XMSFC_REG_MODE);
|
||
+ writel(0x01, XMSFC_REG_MODE);
|
||
+ while((readl(XMSFC_REG_MODE_ST) & 0x01) == 0);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+
|
||
+
|
||
+static int xmsfc_read_cmd(u8 cmd, void *data, size_t data_len)
|
||
+{
|
||
+ if(cmd == XMSFC_CMD_READ_JEDECID)
|
||
+ {
|
||
+ writel(XMSFC_CMD_READ_JEDECID, XMSFC_REG_CMD);
|
||
+ writel(2, XMSFC_REG_RW_SR_BSEL);
|
||
+ writel(XMSFC_OP3, XMSFC_REG_OP);
|
||
+
|
||
+ udelay(1);
|
||
+ while((readl(XMSFC_REG_ST) & XMSFC_OP3_OK) == 0);
|
||
+
|
||
+ *((u32*)data) = readl(XMSFC_REG_SRR) & 0x00FFFFFF;
|
||
+ }
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int xmsfc_driver_probe(struct platform_device * plat_dev)
|
||
+{
|
||
+ struct mtd_info *mtd;
|
||
+ int nr_parts = 0;
|
||
+ struct xmsfc_host *host;
|
||
+ const struct xmsfc_params *params;
|
||
+ struct mtd_partition *parts = NULL;
|
||
+ static char const *part_probes[] = {
|
||
+ "cmdlinepart",
|
||
+ NULL,
|
||
+ };
|
||
+
|
||
+ unsigned int jedec = 0;
|
||
+
|
||
+ if(xmsfc_read_cmd(XMSFC_CMD_READ_JEDECID, &jedec, sizeof(jedec)))
|
||
+ {
|
||
+ printk(KERN_ERR "XMSFC: Failed to get idcodes\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+
|
||
+ params = xmsfc_params_table;
|
||
+ for (; params->name != NULL; params++)
|
||
+ {
|
||
+ if ((params->jedec) == jedec)
|
||
+ break;
|
||
+ }
|
||
+ if (!params->name)
|
||
+ {
|
||
+ printk(KERN_WARNING "XMSFC: Unsupported flash IDs: %#x using default", jedec);
|
||
+ params = &xmsfc_params_default;
|
||
+ }
|
||
+ else
|
||
+ {
|
||
+ printk(KERN_INFO "XMSFC: flash name:%s size:%#x\n", params->name, params->size);
|
||
+ }
|
||
+
|
||
+ if((params->flags & FLAG_QUAD_ONE_ADDR) == 1)
|
||
+ {
|
||
+ if(xmsfc_entry_quad_1addr(params))
|
||
+ return -ENODEV;
|
||
+ printk(KERN_INFO "XMSFC: flash entry quad one addr\n");
|
||
+ }
|
||
+
|
||
+ host = kmalloc(sizeof(struct xmsfc_host), GFP_KERNEL);
|
||
+ if (!host)
|
||
+ return -ENOMEM;
|
||
+ memset(host, 0, sizeof(struct xmsfc_host));
|
||
+ platform_set_drvdata(plat_dev, host);
|
||
+
|
||
+ host->iobase = ioremap_nocache(XMSF_BASE_ADDR, XMSF_BASE_LEN);
|
||
+ if (!host->iobase) {
|
||
+ printk(KERN_ERR "spi buffer ioremap failed.\n");
|
||
+ goto fail;
|
||
+ }
|
||
+ host->cmd_erase = params->cmd_erase;
|
||
+ host->cmd_write = params->cmd_write;
|
||
+ host->cmd_read = params->cmd_read;
|
||
+
|
||
+ mutex_init(&host->lock);
|
||
+ mtd = host->mtd;
|
||
+ mtd->name = (char *)plat_dev->name;
|
||
+ mtd->type = MTD_NORFLASH;
|
||
+ mtd->writesize = 1;
|
||
+ mtd->flags = MTD_CAP_NORFLASH;
|
||
+ mtd->owner = THIS_MODULE;
|
||
+ mtd->erase = xmsfc_reg_erase;
|
||
+ mtd->write = xmsfc_reg_write;
|
||
+ mtd->read = xmsfc_bus_read;
|
||
+ mtd->size = params->size;
|
||
+ mtd->erasesize = params->erasesize;
|
||
+
|
||
+ nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
|
||
+
|
||
+ return mtd_device_register(mtd, parts, nr_parts) == 1 ? -ENODEV : 0;
|
||
+
|
||
+fail:
|
||
+ if (host->iobase)
|
||
+ iounmap(host->iobase);
|
||
+ kfree(host);
|
||
+ platform_set_drvdata(plat_dev, NULL);
|
||
+ return -EIO;
|
||
+}
|
||
+static int xmsfc_driver_remove(struct platform_device * plat_dev)
|
||
+{
|
||
+ struct xmsfc_host *host = platform_get_drvdata(plat_dev);
|
||
+
|
||
+ mtd_device_unregister(host->mtd);
|
||
+
|
||
+ if (host->iobase)
|
||
+ iounmap(host->iobase);
|
||
+
|
||
+ kfree(host);
|
||
+ platform_set_drvdata(plat_dev, NULL);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void xmsfc_driver_shutdown(struct platform_device *pltdev)
|
||
+{
|
||
+
|
||
+}
|
||
+
|
||
+static struct platform_driver xmsfc_driver_pltdrv = {
|
||
+ .probe = xmsfc_driver_probe,
|
||
+ .remove = xmsfc_driver_remove,
|
||
+ .shutdown = xmsfc_driver_shutdown,
|
||
+ .driver.name = "xm_sfc",
|
||
+ .driver.owner = THIS_MODULE,
|
||
+ .driver.bus = &platform_bus_type,
|
||
+};
|
||
+
|
||
+static struct platform_device xmsfc_device_pltdev = {
|
||
+ .name = "xm_sfc",
|
||
+ .id = -1,
|
||
+};
|
||
+
|
||
+static int __init xmsfc_module_init(void)
|
||
+{
|
||
+ int result = 0;
|
||
+
|
||
+ printk(KERN_DEBUG "XM Spi Flash Controller Device Driver Version 1.0\n");
|
||
+
|
||
+ result = platform_driver_register(&xmsfc_driver_pltdrv);
|
||
+ if (result < 0)
|
||
+ return result;
|
||
+
|
||
+ result = platform_device_register(&xmsfc_device_pltdev);
|
||
+ if (result < 0) {
|
||
+ platform_driver_unregister(&xmsfc_driver_pltdrv);
|
||
+ return result;
|
||
+ }
|
||
+
|
||
+ return result;
|
||
+}
|
||
+
|
||
+static void __exit xmsfc_module_exit(void)
|
||
+{
|
||
+ platform_device_unregister(&xmsfc_device_pltdev);
|
||
+ platform_driver_unregister(&xmsfc_driver_pltdrv);
|
||
+}
|
||
+
|
||
+module_init(xmsfc_module_init);
|
||
+module_exit(xmsfc_module_exit);
|
||
+
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/mtd/devices/xmsfc/xmsfc.h linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/xmsfc.h
|
||
--- linux-3.0.101/drivers/mtd/devices/xmsfc/xmsfc.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/xmsfc.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,102 @@
|
||
+#ifndef _XMSFC_H_
|
||
+#define _XMSFC_H_
|
||
+
|
||
+/*****************************************************************************/
|
||
+
|
||
+#define _1K (0x400)
|
||
+#define _2K (0x800)
|
||
+
|
||
+#define _4K (0x1000)
|
||
+#define _8K (0x2000)
|
||
+#define _16K (0x4000)
|
||
+#define _32K (0x8000)
|
||
+
|
||
+#define _64K (0x10000)
|
||
+#define _128K (0x20000)
|
||
+#define _256K (0x40000)
|
||
+#define _512K (0x80000)
|
||
+
|
||
+#define _1M (0x100000)
|
||
+#define _2M (0x200000)
|
||
+#define _4M (0x400000)
|
||
+#define _8M (0x800000)
|
||
+
|
||
+#define _16M (0x1000000)
|
||
+#define _32M (0x2000000)
|
||
+#define _64M (0x4000000)
|
||
+
|
||
+#define INFINITE (0xFFFFFFFF)
|
||
+/*****************************************************************************/
|
||
+
|
||
+#define XMSFC_REG_BASE_ADDR IO_ADDRESS(0x100F0000)
|
||
+
|
||
+#define XMSFC_REG_MODE (XMSFC_REG_BASE_ADDR + 0x00)
|
||
+#define XMSFC_REG_ADDR (XMSFC_REG_BASE_ADDR + 0x04) /* Flash memory<72><79>ַ<EFBFBD>Ĵ<EFBFBD><C4B4><EFBFBD>; */
|
||
+#define XMSFC_REG_OP (XMSFC_REG_BASE_ADDR + 0x08) /* ָ<><D6B8><EFBFBD>Ĵ<EFBFBD><C4B4><EFBFBD>; */
|
||
+#define XMSFC_REG_CMD (XMSFC_REG_BASE_ADDR + 0x0C) /* */
|
||
+#define XMSFC_REG_SRW (XMSFC_REG_BASE_ADDR + 0x10) /* flash<73><68>־д<D6BE>Ĵ<EFBFBD><C4B4><EFBFBD>; */
|
||
+#define XMSFC_REG_WRNUM (XMSFC_REG_BASE_ADDR + 0x14) /* Flash д<><D0B4><EFBFBD><EFBFBD>; */
|
||
+#define XMSFC_REG_RW_SR_BSEL (XMSFC_REG_BASE_ADDR + 0x18) /* Flash<73><68>д״̬<D7B4>Ĵ<EFBFBD><C4B4><EFBFBD>λ<EFBFBD><CEBB>ѡ<EFBFBD><D1A1>*/
|
||
+#define XMSFC_REG_DOUT (XMSFC_REG_BASE_ADDR + 0x34) /* <20><>flash<73><68><EFBFBD>ݼĴ<DDBC><C4B4><EFBFBD>; */
|
||
+#define XMSFC_REG_ST (XMSFC_REG_BASE_ADDR + 0x38) /* Flash<73><68><EFBFBD><EFBFBD><EFBFBD><EFBFBD>״̬<D7B4>Ĵ<EFBFBD><C4B4><EFBFBD>; */
|
||
+#define XMSFC_REG_SRR (XMSFC_REG_BASE_ADDR + 0x3C) /* flash<73><68>־<EFBFBD><D6BE><EFBFBD>Ĵ<EFBFBD><C4B4><EFBFBD>; */
|
||
+#define XMSFC_REG_MODE_ST (XMSFC_REG_BASE_ADDR + 0x4C)
|
||
+#define XMSFC_REG_CACHE_CMD (XMSFC_REG_BASE_ADDR + 0x80)
|
||
+
|
||
+#define XMSFC_REG_BUF (XMSFC_REG_BASE_ADDR + 0x400) /* */
|
||
+#define XMSFC_REG_BUF_SIZE 256
|
||
+#define XMSFC_REG_BASE_LEN 0x800
|
||
+
|
||
+#define XMSF_BASE_ADDR 0
|
||
+#define XMSF_BASE_LEN 0x01000000 /*16MB*/
|
||
+
|
||
+#define XMSFC_OP1_OK 0x01 //
|
||
+#define XMSFC_OP2_OK 0x02 //
|
||
+#define XMSFC_OP3_OK 0x04 //
|
||
+#define XMSFC_OP4_OK 0x08 //
|
||
+#define XMSFC_OP5_OK 0x10 //
|
||
+#define XMSFC_OP6_OK 0x20 //
|
||
+
|
||
+// XMSFCASH operation command
|
||
+#define XMSFC_OP1 0x01 //
|
||
+#define XMSFC_OP2 0x02 //
|
||
+#define XMSFC_OP3 0x03 //
|
||
+#define XMSFC_OP4 0x04 //
|
||
+#define XMSFC_OP5 0x05 //
|
||
+#define XMSFC_OP6 0x06 //
|
||
+
|
||
+/*#define XMSFC_CMD_WRITE_DATA 0x02*/
|
||
+/*#define XMSFC_CMD_READ_DATA 0x03*/
|
||
+#define XMSFC_CMD_READ_JEDECID 0x9F
|
||
+#define XMSFC_CMD_WRITE_ST 0x01
|
||
+#define XMSFC_CMD_READ_ST2 0x35
|
||
+
|
||
+
|
||
+#define DBG_MSG(_fmt, arg...) printk(KERN_INFO "%s(%d): " _fmt, __FILE__, __LINE__, ##arg);
|
||
+
|
||
+#define MTD_TO_HOST(_mtd) ((struct xmsfc_host *)(_mtd))
|
||
+
|
||
+#define FLAG_QUAD_ONE_ADDR 0x01
|
||
+
|
||
+struct xmsfc_host {
|
||
+ struct mtd_info mtd[1];
|
||
+ void __iomem *iobase;
|
||
+ struct mutex lock;
|
||
+ unsigned char cmd_erase;
|
||
+ unsigned char cmd_write;
|
||
+ unsigned char cmd_read;
|
||
+};
|
||
+
|
||
+struct xmsfc_params {
|
||
+ const char *name;
|
||
+ unsigned int jedec;
|
||
+ unsigned int size;
|
||
+ unsigned int erasesize;
|
||
+ unsigned char cmd_erase;
|
||
+ unsigned char cmd_write;
|
||
+ unsigned char cmd_read;
|
||
+ unsigned char flags;
|
||
+};
|
||
+
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/drivers/mtd/devices/xmsfc/xmsfc_params.c linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/xmsfc_params.c
|
||
--- linux-3.0.101/drivers/mtd/devices/xmsfc/xmsfc_params.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/mtd/devices/xmsfc/xmsfc_params.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,11 @@
|
||
+#include <linux/mtd/mtd.h>
|
||
+#include "xmsfc.h"
|
||
+
|
||
+const struct xmsfc_params xmsfc_params_table[] = {
|
||
+ {"W25Q64CV", 0xef4017, _8M, _64K, 0xD8, 0x02, 0x03, 0},
|
||
+ //{"W25Q64CV", 0xef4017, _8M, _64K, 0xD8, 0x32, 0x6B, FLAG_QUAD_ONE_ADDR},
|
||
+ {"MX25L6405D", 0xc22017, _8M, _64K, 0xD8, 0x02, 0x03, 0},
|
||
+};
|
||
+const struct xmsfc_params xmsfc_params_default = {
|
||
+ "DEFAULT", 0x000000, _8M, _64K, 0xD8, 0x02, 0x03, 0
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/Kconfig linux-3.0.101.xm510/drivers/net/Kconfig
|
||
--- linux-3.0.101/drivers/net/Kconfig 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -2540,7 +2540,7 @@
|
||
To compile this driver as a module, choose M here. The module
|
||
will be called s6gmac.
|
||
|
||
-source "drivers/net/stmmac/Kconfig"
|
||
+source "drivers/net/xmmac/Kconfig"
|
||
|
||
config PCH_GBE
|
||
tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
|
||
diff -urN linux-3.0.101/drivers/net/Makefile linux-3.0.101.xm510/drivers/net/Makefile
|
||
--- linux-3.0.101/drivers/net/Makefile 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -107,7 +107,7 @@
|
||
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
|
||
obj-$(CONFIG_RIONET) += rionet.o
|
||
obj-$(CONFIG_SH_ETH) += sh_eth.o
|
||
-obj-$(CONFIG_STMMAC_ETH) += stmmac/
|
||
+obj-$(CONFIG_XMMAC_ETH) += xmmac/
|
||
|
||
#
|
||
# end link order section
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/common.h linux-3.0.101.xm510/drivers/net/xmmac/common.h
|
||
--- linux-3.0.101/drivers/net/xmmac/common.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/common.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,250 @@
|
||
+/*******************************************************************************
|
||
+ STMMAC Common Header File
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/netdevice.h>
|
||
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||
+#define STMMAC_VLAN_TAG_USED
|
||
+#include <linux/if_vlan.h>
|
||
+#endif
|
||
+
|
||
+#include "descs.h"
|
||
+
|
||
+#undef CHIP_DEBUG_PRINT
|
||
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
|
||
+/* #define CHIP_DEBUG_PRINT */
|
||
+
|
||
+#ifdef CHIP_DEBUG_PRINT
|
||
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
|
||
+#else
|
||
+#define CHIP_DBG(fmt, args...) do { } while (0)
|
||
+#endif
|
||
+
|
||
+#undef FRAME_FILTER_DEBUG
|
||
+/* #define FRAME_FILTER_DEBUG */
|
||
+
|
||
+struct stmmac_extra_stats {
|
||
+ /* *******************************************
|
||
+ * Adited by ZengChuanJie
|
||
+ * ******************************************/
|
||
+ /* Transmit errors */
|
||
+ unsigned long tx_underflow ____cacheline_aligned;
|
||
+ unsigned long tx_carrier;
|
||
+ unsigned long tx_losscarrier;
|
||
+ unsigned long tx_deferred;
|
||
+ unsigned long tx_vlan;
|
||
+ unsigned long tx_jabber;
|
||
+ unsigned long tx_frame_flushed;
|
||
+ unsigned long tx_payload_error;
|
||
+ unsigned long tx_ip_header_error;
|
||
+ /* Receive errors */
|
||
+ unsigned long rx_desc;
|
||
+ unsigned long rx_collision;
|
||
+ unsigned long rx_crc;
|
||
+ unsigned long rx_length;
|
||
+ unsigned long rx_gmac_overflow;
|
||
+ unsigned long rx_watchdog;
|
||
+ unsigned long da_rx_filter_fail;
|
||
+ unsigned long sa_rx_filter_fail;
|
||
+ unsigned long rx_missed_cntr;
|
||
+ unsigned long rx_overflow_cntr;
|
||
+ unsigned long rx_vlan;
|
||
+ /******* end *****************/
|
||
+ /* Tx/Rx IRQ errors */
|
||
+ unsigned long tx_undeflow_irq;
|
||
+ unsigned long tx_process_stopped_irq;
|
||
+ unsigned long tx_jabber_irq;
|
||
+ unsigned long rx_overflow_irq;
|
||
+ unsigned long rx_buf_unav_irq;
|
||
+ unsigned long rx_process_stopped_irq;
|
||
+ unsigned long rx_watchdog_irq;
|
||
+ unsigned long tx_early_irq;
|
||
+ unsigned long fatal_bus_error_irq;
|
||
+ /* Extra info */
|
||
+ unsigned long threshold;
|
||
+ unsigned long tx_pkt_n;
|
||
+ unsigned long rx_pkt_n;
|
||
+ unsigned long poll_n;
|
||
+ unsigned long sched_timer_n;
|
||
+ unsigned long normal_irq_n;
|
||
+};
|
||
+
|
||
+#define HASH_TABLE_SIZE 64
|
||
+#define PAUSE_TIME 0x200
|
||
+
|
||
+/* Flow Control defines */
|
||
+#define FLOW_OFF 0
|
||
+#define FLOW_RX 1
|
||
+#define FLOW_TX 2
|
||
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
|
||
+
|
||
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
|
||
+
|
||
+enum rx_frame_status { /* IPC status */
|
||
+ good_frame = 0,
|
||
+ discard_frame = 1,
|
||
+ csum_none = 2,
|
||
+ llc_snap = 4,
|
||
+};
|
||
+
|
||
+enum tx_dma_irq_status {
|
||
+ tx_hard_error = 1,
|
||
+ tx_hard_error_bump_tc = 2,
|
||
+ handle_tx_rx = 3,
|
||
+};
|
||
+
|
||
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
|
||
+#define BUF_SIZE_16KiB 16384
|
||
+#define BUF_SIZE_8KiB 8192
|
||
+#define BUF_SIZE_4KiB 4096
|
||
+#define BUF_SIZE_2KiB 2048
|
||
+
|
||
+/* Power Down and WOL */
|
||
+#define PMT_NOT_SUPPORTED 0
|
||
+#define PMT_SUPPORTED 1
|
||
+
|
||
+/* Common MAC defines */
|
||
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
|
||
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
|
||
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
|
||
+
|
||
+/* MAC Management Counters register */
|
||
+#define MMC_CONTROL 0x00000100 /* MMC Control */
|
||
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
|
||
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
|
||
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
|
||
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
|
||
+
|
||
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
|
||
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
|
||
+#define MMC_CONTROL_MAX_FRAME 0x7FF
|
||
+
|
||
+struct stmmac_desc_ops {
|
||
+ /* DMA RX descriptor ring initialization */
|
||
+ void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
|
||
+ int disable_rx_ic);
|
||
+ /* DMA TX descriptor ring initialization */
|
||
+ void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
|
||
+
|
||
+ /* Invoked by the xmit function to prepare the tx descriptor */
|
||
+ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
|
||
+ int csum_flag);
|
||
+ /* Set/get the owner of the descriptor */
|
||
+ void (*set_tx_owner) (struct dma_desc *p);
|
||
+ int (*get_tx_owner) (struct dma_desc *p);
|
||
+ /* Invoked by the xmit function to close the tx descriptor */
|
||
+ void (*close_tx_desc) (struct dma_desc *p);
|
||
+ /* Clean the tx descriptor as soon as the tx irq is received */
|
||
+ void (*release_tx_desc) (struct dma_desc *p);
|
||
+ /* Clear interrupt on tx frame completion. When this bit is
|
||
+ * set an interrupt happens as soon as the frame is transmitted */
|
||
+ void (*clear_tx_ic) (struct dma_desc *p);
|
||
+ /* Last tx segment reports the transmit status */
|
||
+ int (*get_tx_ls) (struct dma_desc *p);
|
||
+ /* Return the transmit status looking at the TDES1 */
|
||
+ int (*tx_status) (void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p, void __iomem *ioaddr);
|
||
+ /* Get the buffer size from the descriptor */
|
||
+ int (*get_tx_len) (struct dma_desc *p);
|
||
+ /* Handle extra events on specific interrupts hw dependent */
|
||
+ int (*get_rx_owner) (struct dma_desc *p);
|
||
+ void (*set_rx_owner) (struct dma_desc *p);
|
||
+ /* Get the receive frame size */
|
||
+ int (*get_rx_frame_len) (struct dma_desc *p);
|
||
+ /* Return the reception status looking at the RDES1 */
|
||
+ int (*rx_status) (void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p);
|
||
+};
|
||
+
|
||
+struct stmmac_dma_ops {
|
||
+ /* DMA core initialization */
|
||
+ int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
|
||
+ /* Dump DMA registers */
|
||
+ void (*dump_regs) (void __iomem *ioaddr);
|
||
+ /* Set tx/rx threshold in the csr6 register
|
||
+ * An invalid value enables the store-and-forward mode */
|
||
+ void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
|
||
+ /* To track extra statistic (if supported) */
|
||
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
|
||
+ void __iomem *ioaddr);
|
||
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
|
||
+ void (*enable_dma_irq) (void __iomem *ioaddr);
|
||
+ void (*disable_dma_irq) (void __iomem *ioaddr);
|
||
+ void (*start_tx) (void __iomem *ioaddr);
|
||
+ void (*stop_tx) (void __iomem *ioaddr);
|
||
+ void (*start_rx) (void __iomem *ioaddr);
|
||
+ void (*stop_rx) (void __iomem *ioaddr);
|
||
+ int (*dma_interrupt) (void __iomem *ioaddr,
|
||
+ struct stmmac_extra_stats *x);
|
||
+};
|
||
+
|
||
+struct stmmac_ops {
|
||
+ /* MAC core initialization */
|
||
+ void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
|
||
+ /* Support checksum offload engine */
|
||
+ int (*rx_coe) (void __iomem *ioaddr);
|
||
+ /* Dump MAC registers */
|
||
+ void (*dump_regs) (void __iomem *ioaddr);
|
||
+ /* Handle extra events on specific interrupts hw dependent */
|
||
+ void (*host_irq_status) (void __iomem *ioaddr);
|
||
+ /* Multicast filter setting */
|
||
+ void (*set_filter) (struct net_device *dev);
|
||
+ /* Flow control setting */
|
||
+ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
|
||
+ unsigned int fc, unsigned int pause_time);
|
||
+ /* Set power management mode (e.g. magic frame) */
|
||
+ void (*pmt) (void __iomem *ioaddr, unsigned long mode);
|
||
+ /* Set/Get Unicast MAC addresses */
|
||
+ void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n);
|
||
+ void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n);
|
||
+};
|
||
+
|
||
+struct mac_link {
|
||
+ int port;
|
||
+ int duplex;
|
||
+ int speed;
|
||
+};
|
||
+
|
||
+struct mii_regs {
|
||
+ unsigned int addr; /* MII Address */
|
||
+ unsigned int data; /* MII Data */
|
||
+};
|
||
+
|
||
+struct mac_device_info {
|
||
+ const struct stmmac_ops *mac;
|
||
+ const struct stmmac_desc_ops *desc;
|
||
+ const struct stmmac_dma_ops *dma;
|
||
+ struct mii_regs mii; /* MII register Addresses */
|
||
+ struct mac_link link;
|
||
+};
|
||
+
|
||
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
|
||
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
|
||
+
|
||
+extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
|
||
+ unsigned int high, unsigned int low);
|
||
+extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int high, unsigned int low);
|
||
+extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/descs.h linux-3.0.101.xm510/drivers/net/xmmac/descs.h
|
||
--- linux-3.0.101/drivers/net/xmmac/descs.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/descs.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,167 @@
|
||
+/*******************************************************************************
|
||
+ Header File to describe the DMA descriptors.
|
||
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+struct dma_desc {
|
||
+ /* Receive descriptor */
|
||
+ union {
|
||
+ struct {
|
||
+ /* RDES0 */
|
||
+ u32 rx_payload_error:1;
|
||
+ u32 crc_error:1;
|
||
+ u32 dribbling:1;
|
||
+ u32 mii_error:1;
|
||
+ u32 receive_watchdog:1;
|
||
+ u32 frame_type:1;
|
||
+ u32 collision:1;
|
||
+ u32 IPC:1; /* IPC Checksum Error or Giant Frame */
|
||
+ u32 last_descriptor:1;
|
||
+ u32 first_descriptor:1;
|
||
+ u32 vlan_tag:1;
|
||
+ u32 overflow_error:1;
|
||
+ u32 length_error:1;
|
||
+ u32 SAF:1; /* source Address Filter Fail */
|
||
+ u32 descriptor_error:1;
|
||
+ u32 error_summary:1;
|
||
+ u32 frame_length:14;
|
||
+ u32 filtering_fail:1;
|
||
+ u32 own:1;
|
||
+ /* RDES1 */
|
||
+ u32 buffer1_size:11;
|
||
+ u32 buffer2_size:11;
|
||
+ u32 reserved2:2;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 reserved3:5;
|
||
+ u32 disable_ic:1;
|
||
+ } rx;
|
||
+ struct {
|
||
+ /* RDES0 */
|
||
+ u32 payload_csum_error:1;
|
||
+ u32 crc_error:1;
|
||
+ u32 dribbling:1;
|
||
+ u32 error_gmii:1;
|
||
+ u32 receive_watchdog:1;
|
||
+ u32 frame_type:1;
|
||
+ u32 late_collision:1;
|
||
+ u32 ipc_csum_error:1;
|
||
+ u32 last_descriptor:1;
|
||
+ u32 first_descriptor:1;
|
||
+ u32 vlan_tag:1;
|
||
+ u32 overflow_error:1;
|
||
+ u32 length_error:1;
|
||
+ u32 sa_filter_fail:1;
|
||
+ u32 descriptor_error:1;
|
||
+ u32 error_summary:1;
|
||
+ u32 frame_length:14;
|
||
+ u32 da_filter_fail:1;
|
||
+ u32 own:1;
|
||
+ /* RDES1 */
|
||
+ u32 buffer1_size:13;
|
||
+ u32 reserved1:1;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 buffer2_size:13;
|
||
+ u32 reserved2:2;
|
||
+ u32 disable_ic:1;
|
||
+ } erx; /* -- enhanced -- */
|
||
+
|
||
+ /* Transmit descriptor */
|
||
+ struct {
|
||
+ /* TDES0 */
|
||
+ u32 deferred:1;
|
||
+ u32 underflow_error:1;
|
||
+ u32 excessive_deferral:1;
|
||
+ u32 collision_count:4;
|
||
+ u32 vlan_frame:1;
|
||
+ u32 excessive_collisions:1;
|
||
+ u32 late_collision:1;
|
||
+ u32 no_carrier:1;
|
||
+ u32 loss_carrier:1;
|
||
+ u32 payload_checksum_err:1;
|
||
+ u32 frame_flushed:1;
|
||
+ u32 jabber_timeout:1;
|
||
+ u32 error_summary:1;
|
||
+ u32 ip_header_error:1;
|
||
+ u32 ttss:1;
|
||
+ u32 reserved2:13;
|
||
+ u32 own:1;
|
||
+ /* TDES1 */
|
||
+ u32 buffer1_size:11;
|
||
+ u32 buffer2_size:11;
|
||
+ u32 reserved3:1;
|
||
+ u32 disable_padding:1;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 crc_disable:1;
|
||
+ u32 reserved4:2;
|
||
+ u32 first_segment:1;
|
||
+ u32 last_segment:1;
|
||
+ u32 interrupt:1;
|
||
+ } tx;
|
||
+ struct {
|
||
+ /* TDES0 */
|
||
+ u32 deferred:1;
|
||
+ u32 underflow_error:1;
|
||
+ u32 excessive_deferral:1;
|
||
+ u32 collision_count:4;
|
||
+ u32 vlan_frame:1;
|
||
+ u32 excessive_collisions:1;
|
||
+ u32 late_collision:1;
|
||
+ u32 no_carrier:1;
|
||
+ u32 loss_carrier:1;
|
||
+ u32 payload_error:1;
|
||
+ u32 frame_flushed:1;
|
||
+ u32 jabber_timeout:1;
|
||
+ u32 error_summary:1;
|
||
+ u32 ip_header_error:1;
|
||
+ u32 time_stamp_status:1;
|
||
+ u32 reserved1:2;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 checksum_insertion:2;
|
||
+ u32 reserved2:1;
|
||
+ u32 time_stamp_enable:1;
|
||
+ u32 disable_padding:1;
|
||
+ u32 crc_disable:1;
|
||
+ u32 first_segment:1;
|
||
+ u32 last_segment:1;
|
||
+ u32 interrupt:1;
|
||
+ u32 own:1;
|
||
+ /* TDES1 */
|
||
+ u32 buffer1_size:13;
|
||
+ u32 reserved3:3;
|
||
+ u32 buffer2_size:13;
|
||
+ u32 reserved4:3;
|
||
+ } etx; /* -- enhanced -- */
|
||
+ } des01;
|
||
+ unsigned int des2;
|
||
+ unsigned int des3;
|
||
+};
|
||
+
|
||
+/* Transmit checksum insertion control */
|
||
+enum tdes_csum_insertion {
|
||
+ cic_disabled = 0, /* Checksum Insertion Control */
|
||
+ cic_only_ip = 1, /* Only IP header */
|
||
+ cic_no_pseudoheader = 2, /* IP header but pseudoheader
|
||
+ * is not calculated */
|
||
+ cic_full = 3, /* IP header and pseudoheader */
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac1000_core.c linux-3.0.101.xm510/drivers/net/xmmac/dwmac1000_core.c
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac1000_core.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac1000_core.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,250 @@
|
||
+/*******************************************************************************
|
||
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
|
||
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
|
||
+ developing this code.
|
||
+
|
||
+ This only implements the mac core functions for this chip.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/crc32.h>
|
||
+#include <linux/slab.h>
|
||
+#include "dwmac1000.h"
|
||
+
|
||
+static void dwmac1000_core_init(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + GMAC_CONTROL);
|
||
+ value |= GMAC_CORE_INIT;
|
||
+ writel(value, ioaddr + GMAC_CONTROL);
|
||
+
|
||
+ /* STBus Bridge Configuration */
|
||
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
|
||
+
|
||
+ /* Freeze MMC counters */
|
||
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
|
||
+ /* Mask GMAC interrupts */
|
||
+ writel(0x207, ioaddr + GMAC_INT_MASK);
|
||
+
|
||
+#ifdef STMMAC_VLAN_TAG_USED
|
||
+ /* Tag detection without filtering */
|
||
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
|
||
+#endif
|
||
+}
|
||
+
|
||
+static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + GMAC_CONTROL);
|
||
+
|
||
+ value |= GMAC_CONTROL_IPC;
|
||
+ writel(value, ioaddr + GMAC_CONTROL);
|
||
+
|
||
+ value = readl(ioaddr + GMAC_CONTROL);
|
||
+
|
||
+ return !!(value & GMAC_CONTROL_IPC);
|
||
+}
|
||
+
|
||
+static void dwmac1000_dump_regs(void __iomem *ioaddr)
|
||
+{
|
||
+ int i;
|
||
+ pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
|
||
+
|
||
+ for (i = 0; i < 55; i++) {
|
||
+ int offset = i * 4;
|
||
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
|
||
+ offset, readl(ioaddr + offset));
|
||
+ }
|
||
+}
|
||
+
|
||
+static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n)
|
||
+{
|
||
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
|
||
+ GMAC_ADDR_LOW(reg_n));
|
||
+}
|
||
+
|
||
+static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n)
|
||
+{
|
||
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
|
||
+ GMAC_ADDR_LOW(reg_n));
|
||
+}
|
||
+
|
||
+static void dwmac1000_set_filter(struct net_device *dev)
|
||
+{
|
||
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
|
||
+ unsigned int value = 0;
|
||
+
|
||
+ CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
|
||
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
|
||
+
|
||
+ if (dev->flags & IFF_PROMISC)
|
||
+ value = GMAC_FRAME_FILTER_PR;
|
||
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|
||
+ || (dev->flags & IFF_ALLMULTI)) {
|
||
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
|
||
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
|
||
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
|
||
+ } else if (!netdev_mc_empty(dev)) {
|
||
+ u32 mc_filter[2];
|
||
+ struct netdev_hw_addr *ha;
|
||
+
|
||
+ /* Hash filter for multicast */
|
||
+ value = GMAC_FRAME_FILTER_HMC;
|
||
+
|
||
+ memset(mc_filter, 0, sizeof(mc_filter));
|
||
+ netdev_for_each_mc_addr(ha, dev) {
|
||
+ /* The upper 6 bits of the calculated CRC are used to
|
||
+ index the contens of the hash table */
|
||
+ int bit_nr =
|
||
+ bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
|
||
+ /* The most significant bit determines the register to
|
||
+ * use (H/L) while the other 5 bits determine the bit
|
||
+ * within the register. */
|
||
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
|
||
+ }
|
||
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
|
||
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
|
||
+ }
|
||
+
|
||
+ /* Handle multiple unicast addresses (perfect filtering)*/
|
||
+ if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
|
||
+ /* Switch to promiscuous mode is more than 16 addrs
|
||
+ are required */
|
||
+ value |= GMAC_FRAME_FILTER_PR;
|
||
+ else {
|
||
+ int reg = 1;
|
||
+ struct netdev_hw_addr *ha;
|
||
+
|
||
+ netdev_for_each_uc_addr(ha, dev) {
|
||
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
|
||
+ reg++;
|
||
+ }
|
||
+ }
|
||
+
|
||
+#ifdef FRAME_FILTER_DEBUG
|
||
+ /* Enable Receive all mode (to debug filtering_fail errors) */
|
||
+ value |= GMAC_FRAME_FILTER_RA;
|
||
+#endif
|
||
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
|
||
+
|
||
+ CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
|
||
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
|
||
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
|
||
+}
|
||
+
|
||
+static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
|
||
+ unsigned int fc, unsigned int pause_time)
|
||
+{
|
||
+ unsigned int flow = 0;
|
||
+
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
|
||
+ if (fc & FLOW_RX) {
|
||
+ CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
|
||
+ flow |= GMAC_FLOW_CTRL_RFE;
|
||
+ }
|
||
+ if (fc & FLOW_TX) {
|
||
+ CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
|
||
+ flow |= GMAC_FLOW_CTRL_TFE;
|
||
+ }
|
||
+
|
||
+ if (duplex) {
|
||
+ CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
|
||
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
|
||
+ }
|
||
+
|
||
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
|
||
+}
|
||
+
|
||
+static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
|
||
+{
|
||
+ unsigned int pmt = 0;
|
||
+
|
||
+ if (mode & WAKE_MAGIC) {
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
|
||
+ pmt |= power_down | magic_pkt_en;
|
||
+ }
|
||
+ if (mode & WAKE_UCAST) {
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
|
||
+ pmt |= global_unicast;
|
||
+ }
|
||
+
|
||
+ writel(pmt, ioaddr + GMAC_PMT);
|
||
+}
|
||
+
|
||
+
|
||
+static void dwmac1000_irq_status(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
|
||
+
|
||
+ /* Not used events (e.g. MMC interrupts) are not handled. */
|
||
+ if ((intr_status & mmc_tx_irq))
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
|
||
+ readl(ioaddr + GMAC_MMC_TX_INTR));
|
||
+ if (unlikely(intr_status & mmc_rx_irq))
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
|
||
+ readl(ioaddr + GMAC_MMC_RX_INTR));
|
||
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
|
||
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
|
||
+ if (unlikely(intr_status & pmt_irq)) {
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
|
||
+ /* clear the PMT bits 5 and 6 by reading the PMT
|
||
+ * status register. */
|
||
+ readl(ioaddr + GMAC_PMT);
|
||
+ }
|
||
+}
|
||
+
|
||
+static const struct stmmac_ops dwmac1000_ops = {
|
||
+ .core_init = dwmac1000_core_init,
|
||
+ .rx_coe = dwmac1000_rx_coe_supported,
|
||
+ .dump_regs = dwmac1000_dump_regs,
|
||
+ .host_irq_status = dwmac1000_irq_status,
|
||
+ .set_filter = dwmac1000_set_filter,
|
||
+ .flow_ctrl = dwmac1000_flow_ctrl,
|
||
+ .pmt = dwmac1000_pmt,
|
||
+ .set_umac_addr = dwmac1000_set_umac_addr,
|
||
+ .get_umac_addr = dwmac1000_get_umac_addr,
|
||
+};
|
||
+
|
||
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
|
||
+{
|
||
+ struct mac_device_info *mac;
|
||
+ u32 uid = readl(ioaddr + GMAC_VERSION);
|
||
+
|
||
+ pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
|
||
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
|
||
+
|
||
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
|
||
+ if (!mac)
|
||
+ return NULL;
|
||
+
|
||
+ mac->mac = &dwmac1000_ops;
|
||
+ mac->dma = &dwmac1000_dma_ops;
|
||
+
|
||
+ mac->link.port = GMAC_CONTROL_PS;
|
||
+ mac->link.duplex = GMAC_CONTROL_DM;
|
||
+ mac->link.speed = GMAC_CONTROL_FES;
|
||
+ mac->mii.addr = GMAC_MII_ADDR;
|
||
+ mac->mii.data = GMAC_MII_DATA;
|
||
+
|
||
+ return mac;
|
||
+}
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac1000_dma.c linux-3.0.101.xm510/drivers/net/xmmac/dwmac1000_dma.c
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac1000_dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac1000_dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,154 @@
|
||
+/*******************************************************************************
|
||
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
|
||
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
|
||
+ developing this code.
|
||
+
|
||
+ This contains the functions to handle the dma.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include "dwmac1000.h"
|
||
+#include "dwmac_dma.h"
|
||
+
|
||
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
|
||
+ u32 dma_rx)
|
||
+{
|
||
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
|
||
+ int limit;
|
||
+
|
||
+ /* DMA SW reset */
|
||
+ value |= DMA_BUS_MODE_SFT_RESET;
|
||
+ writel(value, ioaddr + DMA_BUS_MODE);
|
||
+ limit = 15000;
|
||
+ while (limit--) {
|
||
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
|
||
+ break;
|
||
+ }
|
||
+ if (limit < 0)
|
||
+ return -EBUSY;
|
||
+
|
||
+ value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
|
||
+ ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
|
||
+ (pbl << DMA_BUS_MODE_RPBL_SHIFT));
|
||
+
|
||
+#ifdef CONFIG_STMMAC_DA
|
||
+ value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
|
||
+#endif
|
||
+ writel(value, ioaddr + DMA_BUS_MODE);
|
||
+
|
||
+ /* Mask interrupts by writing to CSR7 */
|
||
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||
+
|
||
+ /* The base address of the RX/TX descriptor lists must be written into
|
||
+ * DMA CSR3 and CSR4, respectively. */
|
||
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
|
||
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
|
||
+ int rxmode)
|
||
+{
|
||
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||
+
|
||
+ if (txmode == SF_DMA_MODE) {
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
|
||
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
|
||
+ csr6 |= DMA_CONTROL_TSF;
|
||
+ /* Operating on second frame increase the performance
|
||
+ * especially when transmit store-and-forward is used.*/
|
||
+ csr6 |= DMA_CONTROL_OSF;
|
||
+ } else {
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
|
||
+ " (threshold = %d)\n", txmode);
|
||
+ csr6 &= ~DMA_CONTROL_TSF;
|
||
+ csr6 &= DMA_CONTROL_TC_TX_MASK;
|
||
+ /* Set the transmit threshold */
|
||
+ if (txmode <= 32)
|
||
+ csr6 |= DMA_CONTROL_TTC_32;
|
||
+ else if (txmode <= 64)
|
||
+ csr6 |= DMA_CONTROL_TTC_64;
|
||
+ else if (txmode <= 128)
|
||
+ csr6 |= DMA_CONTROL_TTC_128;
|
||
+ else if (txmode <= 192)
|
||
+ csr6 |= DMA_CONTROL_TTC_192;
|
||
+ else
|
||
+ csr6 |= DMA_CONTROL_TTC_256;
|
||
+ }
|
||
+
|
||
+ if (rxmode == SF_DMA_MODE) {
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
|
||
+ csr6 |= DMA_CONTROL_RSF;
|
||
+ } else {
|
||
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
|
||
+ " (threshold = %d)\n", rxmode);
|
||
+ csr6 &= ~DMA_CONTROL_RSF;
|
||
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
|
||
+ if (rxmode <= 32)
|
||
+ csr6 |= DMA_CONTROL_RTC_32;
|
||
+ else if (rxmode <= 64)
|
||
+ csr6 |= DMA_CONTROL_RTC_64;
|
||
+ else if (rxmode <= 96)
|
||
+ csr6 |= DMA_CONTROL_RTC_96;
|
||
+ else
|
||
+ csr6 |= DMA_CONTROL_RTC_128;
|
||
+ }
|
||
+
|
||
+ writel(csr6, ioaddr + DMA_CONTROL);
|
||
+}
|
||
+
|
||
+/* Not yet implemented --- no RMON module */
|
||
+static void dwmac1000_dma_diagnostic_fr(void *data,
|
||
+ struct stmmac_extra_stats *x, void __iomem *ioaddr)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
|
||
+{
|
||
+ int i;
|
||
+ pr_info(" DMA registers\n");
|
||
+ for (i = 0; i < 22; i++) {
|
||
+ if ((i < 9) || (i > 17)) {
|
||
+ int offset = i * 4;
|
||
+ pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
|
||
+ (DMA_BUS_MODE + offset),
|
||
+ readl(ioaddr + DMA_BUS_MODE + offset));
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+const struct stmmac_dma_ops dwmac1000_dma_ops = {
|
||
+ .init = dwmac1000_dma_init,
|
||
+ .dump_regs = dwmac1000_dump_dma_regs,
|
||
+ .dma_mode = dwmac1000_dma_operation_mode,
|
||
+ .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
|
||
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
|
||
+ .enable_dma_irq = dwmac_enable_dma_irq,
|
||
+ .disable_dma_irq = dwmac_disable_dma_irq,
|
||
+ .start_tx = dwmac_dma_start_tx,
|
||
+ .stop_tx = dwmac_dma_stop_tx,
|
||
+ .start_rx = dwmac_dma_start_rx,
|
||
+ .stop_rx = dwmac_dma_stop_rx,
|
||
+ .dma_interrupt = dwmac_dma_interrupt,
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac1000.h linux-3.0.101.xm510/drivers/net/xmmac/dwmac1000.h
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac1000.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac1000.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,208 @@
|
||
+/*******************************************************************************
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/phy.h>
|
||
+#include "common.h"
|
||
+
|
||
+#define GMAC_CONTROL 0x00000000 /* Configuration */
|
||
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
|
||
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
|
||
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
|
||
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
|
||
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
|
||
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
|
||
+#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
|
||
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
|
||
+#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
|
||
+
|
||
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
|
||
+enum dwmac1000_irq_status {
|
||
+ time_stamp_irq = 0x0200,
|
||
+ mmc_rx_csum_offload_irq = 0x0080,
|
||
+ mmc_tx_irq = 0x0040,
|
||
+ mmc_rx_irq = 0x0020,
|
||
+ mmc_irq = 0x0010,
|
||
+ pmt_irq = 0x0008,
|
||
+ pcs_ane_irq = 0x0004,
|
||
+ pcs_link_irq = 0x0002,
|
||
+ rgmii_irq = 0x0001,
|
||
+};
|
||
+#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
|
||
+
|
||
+/* PMT Control and Status */
|
||
+#define GMAC_PMT 0x0000002c
|
||
+enum power_event {
|
||
+ pointer_reset = 0x80000000,
|
||
+ global_unicast = 0x00000200,
|
||
+ wake_up_rx_frame = 0x00000040,
|
||
+ magic_frame = 0x00000020,
|
||
+ wake_up_frame_en = 0x00000004,
|
||
+ magic_pkt_en = 0x00000002,
|
||
+ power_down = 0x00000001,
|
||
+};
|
||
+
|
||
+/* GMAC HW ADDR regs */
|
||
+#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
|
||
+#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
|
||
+#define GMAC_MAX_UNICAST_ADDRESSES 16
|
||
+
|
||
+#define GMAC_AN_CTRL 0x000000c0 /* AN control */
|
||
+#define GMAC_AN_STATUS 0x000000c4 /* AN status */
|
||
+#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
|
||
+#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
|
||
+#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
|
||
+#define GMAC_TBI 0x000000d4 /* TBI extend status */
|
||
+#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
|
||
+
|
||
+/* GMAC Configuration defines */
|
||
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
|
||
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
|
||
+#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
|
||
+#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
|
||
+#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
|
||
+enum inter_frame_gap {
|
||
+ GMAC_CONTROL_IFG_88 = 0x00040000,
|
||
+ GMAC_CONTROL_IFG_80 = 0x00020000,
|
||
+ GMAC_CONTROL_IFG_40 = 0x000e0000,
|
||
+};
|
||
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
|
||
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
|
||
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
|
||
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
|
||
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
|
||
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
|
||
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
|
||
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
|
||
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
|
||
+#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Stripping */
|
||
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
|
||
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
|
||
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
|
||
+
|
||
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
|
||
+ GMAC_CONTROL_JE | GMAC_CONTROL_BE)
|
||
+
|
||
+/* GMAC Frame Filter defines */
|
||
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
|
||
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
|
||
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
|
||
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
|
||
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
|
||
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
|
||
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
|
||
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
|
||
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
|
||
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
|
||
+/* GMII ADDR defines */
|
||
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
|
||
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
|
||
+/* GMAC FLOW CTRL defines */
|
||
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
|
||
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
|
||
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
|
||
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
|
||
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
|
||
+
|
||
+/*--- DMA BLOCK defines ---*/
|
||
+/* DMA Bus Mode register defines */
|
||
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
|
||
+#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
|
||
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
|
||
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
|
||
+/* Programmable burst length (passed thorugh platform)*/
|
||
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
|
||
+#define DMA_BUS_MODE_PBL_SHIFT 8
|
||
+
|
||
+enum rx_tx_priority_ratio {
|
||
+ double_ratio = 0x00004000, /*2:1 */
|
||
+ triple_ratio = 0x00008000, /*3:1 */
|
||
+ quadruple_ratio = 0x0000c000, /*4:1 */
|
||
+};
|
||
+
|
||
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
|
||
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
|
||
+#define DMA_BUS_MODE_RPBL_SHIFT 17
|
||
+#define DMA_BUS_MODE_USP 0x00800000
|
||
+#define DMA_BUS_MODE_4PBL 0x01000000
|
||
+#define DMA_BUS_MODE_AAL 0x02000000
|
||
+
|
||
+/* DMA CRS Control and Status Register Mapping */
|
||
+#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
|
||
+#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
|
||
+/* DMA Bus Mode register defines */
|
||
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
|
||
+#define DMA_BUS_PR_RATIO_SHIFT 14
|
||
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
|
||
+
|
||
+/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
|
||
+#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
|
||
+#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
|
||
+#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
|
||
+/* Threshold for Activating the FC */
|
||
+enum rfa {
|
||
+ act_full_minus_1 = 0x00800000,
|
||
+ act_full_minus_2 = 0x00800200,
|
||
+ act_full_minus_3 = 0x00800400,
|
||
+ act_full_minus_4 = 0x00800600,
|
||
+};
|
||
+/* Threshold for Deactivating the FC */
|
||
+enum rfd {
|
||
+ deac_full_minus_1 = 0x00400000,
|
||
+ deac_full_minus_2 = 0x00400800,
|
||
+ deac_full_minus_3 = 0x00401000,
|
||
+ deac_full_minus_4 = 0x00401800,
|
||
+};
|
||
+#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
|
||
+
|
||
+enum ttc_control {
|
||
+ DMA_CONTROL_TTC_64 = 0x00000000,
|
||
+ DMA_CONTROL_TTC_128 = 0x00004000,
|
||
+ DMA_CONTROL_TTC_192 = 0x00008000,
|
||
+ DMA_CONTROL_TTC_256 = 0x0000c000,
|
||
+ DMA_CONTROL_TTC_40 = 0x00010000,
|
||
+ DMA_CONTROL_TTC_32 = 0x00014000,
|
||
+ DMA_CONTROL_TTC_24 = 0x00018000,
|
||
+ DMA_CONTROL_TTC_16 = 0x0001c000,
|
||
+};
|
||
+#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
|
||
+
|
||
+#define DMA_CONTROL_EFC 0x00000100
|
||
+#define DMA_CONTROL_FEF 0x00000080
|
||
+#define DMA_CONTROL_FUF 0x00000040
|
||
+
|
||
+enum rtc_control {
|
||
+ DMA_CONTROL_RTC_64 = 0x00000000,
|
||
+ DMA_CONTROL_RTC_32 = 0x00000008,
|
||
+ DMA_CONTROL_RTC_96 = 0x00000010,
|
||
+ DMA_CONTROL_RTC_128 = 0x00000018,
|
||
+};
|
||
+#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
|
||
+
|
||
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
|
||
+
|
||
+/* MMC registers offset */
|
||
+#define GMAC_MMC_CTRL 0x100
|
||
+#define GMAC_MMC_RX_INTR 0x104
|
||
+#define GMAC_MMC_TX_INTR 0x108
|
||
+#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
|
||
+
|
||
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac100_core.c linux-3.0.101.xm510/drivers/net/xmmac/dwmac100_core.c
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac100_core.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac100_core.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,306 @@
|
||
+/*******************************************************************************
|
||
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
|
||
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
|
||
+
|
||
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
|
||
+ this code.
|
||
+
|
||
+ This only implements the mac core functions for this chip.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+ *******************************************************************************/
|
||
+
|
||
+#include <linux/crc32.h>
|
||
+#include "dwmac100.h"
|
||
+
|
||
+static void dwmac100_core_init(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + MAC_CONTROL);
|
||
+
|
||
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
|
||
+
|
||
+ /**********************************************
|
||
+ * Added By ZengChuanJie
|
||
+ * *******************************************/
|
||
+
|
||
+ /* Flow Control */
|
||
+ /* 1.Pause Time 0x200 */
|
||
+ //value = MAC_FLOW_CTRL_PAUSETIME;
|
||
+ /* 2.Enable Zero-Quanta Pause */
|
||
+ /* 3. Pause Low Threshold */
|
||
+// value |= MAC_FLOW_CTRL_PLT28;
|
||
+ /* 4. Unicase Pause Frame Detect */
|
||
+// value |= MAC_FLOW_CTRL_UP;
|
||
+ /* 5. Enable Receive and Transmit Flow Control */
|
||
+// value |= (MAC_FLOW_CTRL_RFE|MAC_FLOW_CTRL_TFE);
|
||
+// writel(value, ioaddr+MAC_FLOW_CTRL);
|
||
+
|
||
+ /* end */
|
||
+}
|
||
+
|
||
+static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
|
||
+{
|
||
+ pr_info("\t----------------------------------------------\n"
|
||
+ "\t DWMAC 100 CSR (base addr = 0x%p)\n"
|
||
+ "\t----------------------------------------------\n",
|
||
+ ioaddr);
|
||
+ /* ********************************************************
|
||
+ * Edited By ZengChuanJie
|
||
+ * *******************************************************/
|
||
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
|
||
+ readl(ioaddr + MAC_CONTROL));
|
||
+ pr_info("\tframe filter (offset 0x%x): 0x%08x\n ", MAC_FRAME_FLT,
|
||
+ readl(ioaddr + MAC_FRAME_FLT));
|
||
+ pr_info("\thash high (offset 0x%x): 0x%08x\n", MAC_HASH_HIGH,
|
||
+ readl(ioaddr + MAC_HASH_HIGH));
|
||
+ pr_info("\thash low (offset 0x%x): 0x%08x\n",
|
||
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
|
||
+ pr_info("\tmii addr (offset 0x%x): 0x%08x\n",
|
||
+ MAC_MII_ADDR, readl(ioaddr + MAC_MII_ADDR));
|
||
+ pr_info("\tmii data (offset 0x%x): 0x%08x\n",
|
||
+ MAC_MII_DATA, readl(ioaddr + MAC_MII_DATA));
|
||
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n", MAC_FLOW_CTRL,
|
||
+ readl(ioaddr + MAC_FLOW_CTRL));
|
||
+ pr_info("\tint status (offset 0x%x): 0x%08x\n", MAC_INT_STATUS,
|
||
+ readl(ioaddr + MAC_INT_STATUS));
|
||
+
|
||
+ /* end */
|
||
+
|
||
+}
|
||
+
|
||
+static void dwmac100_irq_status(void __iomem *ioaddr)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+#if 0
|
||
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
|
||
+ unsigned int high, unsigned int low)
|
||
+{
|
||
+ unsigned long data;
|
||
+
|
||
+ data = (addr[5] << 8) | addr[4];
|
||
+ writel(data, ioaddr + high);
|
||
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
|
||
+ writel(data, ioaddr + low);
|
||
+}
|
||
+#endif
|
||
+
|
||
+#if 0
|
||
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int high, unsigned int low)
|
||
+{
|
||
+ unsigned int hi_addr, lo_addr;
|
||
+
|
||
+ /* Read the MAC address from the hardware */
|
||
+ hi_addr = readl(ioaddr + high);
|
||
+ lo_addr = readl(ioaddr + low);
|
||
+
|
||
+ /* Extract the MAC address from the high and low words */
|
||
+ addr[0] = lo_addr & 0xff;
|
||
+ addr[1] = (lo_addr >> 8) & 0xff;
|
||
+ addr[2] = (lo_addr >> 16) & 0xff;
|
||
+ addr[3] = (lo_addr >> 24) & 0xff;
|
||
+ addr[4] = hi_addr & 0xff;
|
||
+ addr[5] = (hi_addr >> 8) & 0xff;
|
||
+}
|
||
+#endif
|
||
+
|
||
+
|
||
+static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n)
|
||
+{
|
||
+ /* **********************************************************
|
||
+ * Edited by ZengChuanJie
|
||
+ * **********************************************************/
|
||
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR0_HIGH, MAC_ADDR0_LOW);
|
||
+ /* end */
|
||
+}
|
||
+
|
||
+static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n)
|
||
+{
|
||
+ /* **********************************************************
|
||
+ * Edited by ZengChuanJie
|
||
+ * **********************************************************/
|
||
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR0_HIGH, MAC_ADDR0_LOW);
|
||
+ /* end */
|
||
+}
|
||
+
|
||
+static void dwmac100_set_filter(struct net_device *dev)
|
||
+{
|
||
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
|
||
+ u32 value = readl(ioaddr + MAC_FRAME_FLT);
|
||
+
|
||
+ if (dev->flags & IFF_PROMISC) {
|
||
+ /* 通过所有的帧,不需要过滤 */
|
||
+ value |= MAC_CONTROL_PR;
|
||
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF |
|
||
+ MAC_CONTROL_HP);
|
||
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|
||
+ || (dev->flags & IFF_ALLMULTI)) {
|
||
+ /* 超过可用容纳的多播帧 */
|
||
+ value |= MAC_CONTROL_PM;
|
||
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF);
|
||
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
|
||
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
|
||
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
|
||
+ /* 没有多播帧,这个函数只会通过set_multilist_addr提供接口,所以不会进入这个分支 */
|
||
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR
|
||
+ | MAC_CONTROL_HP);
|
||
+ } else {
|
||
+ u32 mc_filter[2];
|
||
+ struct netdev_hw_addr *ha;
|
||
+
|
||
+ /* Perfect filter mode for physical address and Hash
|
||
+ filter for multicast */
|
||
+ value |= MAC_CONTROL_HP ;
|
||
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
|
||
+ MAC_CONTROL_IF);
|
||
+ //value |= 0x10;
|
||
+ value |= 0x4;
|
||
+
|
||
+ //int i;
|
||
+ //for (i=0; i<6; i++)
|
||
+ // printk(KERN_EMERG"%02x ", ha->addr[i]);
|
||
+ //printk(KERN_EMERG"set_multilist_addr\n");
|
||
+
|
||
+ memset(mc_filter, 0, sizeof(mc_filter));
|
||
+ netdev_for_each_mc_addr(ha, dev) {
|
||
+ /* The upper 6 bits of the calculated CRC are used to
|
||
+ * index the contens of the hash table */
|
||
+ // for (i=0; i<6; i++)
|
||
+ // printk(KERN_EMERG"%02x ", ha->addr[i]);
|
||
+ // printk("\n");
|
||
+#if 1
|
||
+
|
||
+ int bit_nr =
|
||
+ ether_crc(ETH_ALEN, ha->addr) >> 26;
|
||
+
|
||
+ //printk("bit_nr:%x\n", bit_nr);
|
||
+ bit_nr = ~bit_nr;
|
||
+ //printk("bit_nr:%x\n", bit_nr);
|
||
+ bit_nr &= 0x3f;
|
||
+#endif
|
||
+#if 0
|
||
+
|
||
+ /* crc32 */
|
||
+ unsigned int bit_nr = crc32_le(~0, ha->addr, ETH_ALEN);
|
||
+
|
||
+ printk("bit_nr:%x\n", bit_nr);
|
||
+ bit_nr = bitrev32(bit_nr);
|
||
+ printk("bit_nr:%x\n", bit_nr);
|
||
+ /* get 31bit-26bit */
|
||
+ bit_nr = bit_nr>>26;
|
||
+ printk("bit_nr:%x\n", bit_nr);
|
||
+
|
||
+ /* ~ */
|
||
+ bit_nr = ~bit_nr;
|
||
+ printk("bit_nr:%x\n", bit_nr);
|
||
+
|
||
+ /* lowest 6 bits */
|
||
+ bit_nr = bit_nr &0x3f;
|
||
+ printk("bit_nr:%x\n", bit_nr);
|
||
+#endif
|
||
+
|
||
+ /* The most significant bit determines the register to
|
||
+ * use (H/L) while the other 5 bits determine the bit
|
||
+ * within the register. */
|
||
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
|
||
+ /* 计算出高低HASH 过滤寄存器的值 */
|
||
+ }
|
||
+ //mc_filter[1] = 0xffffffff; /* OK */
|
||
+ //mc_filter[1] = 0xffff;
|
||
+ //mc_filter[1] = 0xff000000;
|
||
+ //mc_filter[1] = 0x00f00000; /* OK */
|
||
+ //mc_filter[1] = 0x00c00000; /* OK */
|
||
+ //mc_filter[1] = 0x00800000; /* OK */
|
||
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
|
||
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
|
||
+ //printk(KERN_EMERG"low:%x\n", mc_filter[0]);
|
||
+ //printk(KERN_EMERG"high:%x\n", mc_filter[1]);
|
||
+ }
|
||
+
|
||
+ writel(value, ioaddr + MAC_FRAME_FLT);
|
||
+
|
||
+ CHIP_DBG(KERN_INFO "%s: frame fileter reg: 0x%08x Hash regs: "
|
||
+ "HI 0x%08x, LO 0x%08x\n",
|
||
+ __func__, readl(ioaddr + MAC_FRAME_FLT),
|
||
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
|
||
+}
|
||
+
|
||
+static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
|
||
+ unsigned int fc, unsigned int pause_time)
|
||
+{
|
||
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
|
||
+
|
||
+ if (duplex)
|
||
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
|
||
+ //printk("flow:%x\n", flow);
|
||
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
|
||
+}
|
||
+
|
||
+/* No PMT module supported for this Ethernet Controller.
|
||
+ * Tested on ST platforms only.
|
||
+ */
|
||
+static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+static const struct stmmac_ops dwmac100_ops = {
|
||
+ .core_init = dwmac100_core_init,
|
||
+ .rx_coe = dwmac100_rx_coe_supported,
|
||
+ .dump_regs = dwmac100_dump_mac_regs,
|
||
+ .host_irq_status = dwmac100_irq_status,
|
||
+ .set_filter = dwmac100_set_filter,
|
||
+ .flow_ctrl = dwmac100_flow_ctrl,
|
||
+ .pmt = dwmac100_pmt,
|
||
+ .set_umac_addr = dwmac100_set_umac_addr,
|
||
+ .get_umac_addr = dwmac100_get_umac_addr,
|
||
+};
|
||
+
|
||
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
|
||
+{
|
||
+ struct mac_device_info *mac;
|
||
+
|
||
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
|
||
+ if (!mac)
|
||
+ return NULL;
|
||
+
|
||
+ //pr_info("\tDWMAC100\n");
|
||
+
|
||
+ mac->mac = &dwmac100_ops;
|
||
+ mac->dma = &dwmac100_dma_ops;
|
||
+
|
||
+ mac->link.port = MAC_CONTROL_PS;
|
||
+ mac->link.duplex = MAC_CONTROL_F;
|
||
+ mac->link.speed = 0;
|
||
+ mac->mii.addr = MAC_MII_ADDR;
|
||
+ mac->mii.data = MAC_MII_DATA;
|
||
+
|
||
+ return mac;
|
||
+}
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac100_dma.c linux-3.0.101.xm510/drivers/net/xmmac/dwmac100_dma.c
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac100_dma.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac100_dma.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,142 @@
|
||
+/*******************************************************************************
|
||
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
|
||
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
|
||
+
|
||
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
|
||
+ this code.
|
||
+
|
||
+ This contains the functions to handle the dma.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include "dwmac100.h"
|
||
+#include "dwmac_dma.h"
|
||
+
|
||
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
|
||
+ u32 dma_rx)
|
||
+{
|
||
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
|
||
+ int limit;
|
||
+
|
||
+ /* DMA SW reset */
|
||
+ value |= DMA_BUS_MODE_SFT_RESET;
|
||
+ writel(value, ioaddr + DMA_BUS_MODE);
|
||
+ limit = 15000;
|
||
+ while (limit--) {
|
||
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
|
||
+ break;
|
||
+ }
|
||
+ if (limit < 0)
|
||
+ return -EBUSY;
|
||
+
|
||
+ /* Enable Application Access by writing to DMA CSR0 */
|
||
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
|
||
+ ioaddr + DMA_BUS_MODE);
|
||
+
|
||
+ /* Mask interrupts by writing to CSR7 */
|
||
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||
+
|
||
+ /* The base address of the RX/TX descriptor lists must be written into
|
||
+ * DMA CSR3 and CSR4, respectively. */
|
||
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
|
||
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/* Store and Forward capability is not used at all..
|
||
+ * The transmit threshold can be programmed by
|
||
+ * setting the TTC bits in the DMA control register.*/
|
||
+static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
|
||
+ int rxmode)
|
||
+{
|
||
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||
+
|
||
+
|
||
+ if (txmode <= 64)
|
||
+ csr6 |= 0;
|
||
+ else
|
||
+ /* > 64 */
|
||
+ csr6 |= 1<<14;
|
||
+
|
||
+ writel(csr6, ioaddr + DMA_CONTROL);
|
||
+}
|
||
+
|
||
+static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
|
||
+{
|
||
+ int i;
|
||
+
|
||
+ CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
|
||
+ for (i = 0; i < 9; i++)
|
||
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
|
||
+ (DMA_BUS_MODE + i * 4),
|
||
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
|
||
+ CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
|
||
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
|
||
+ CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
|
||
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
|
||
+}
|
||
+
|
||
+/* DMA controller has two counters to track the number of
|
||
+ * the receive missed frames. */
|
||
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
|
||
+ void __iomem *ioaddr)
|
||
+{
|
||
+ struct net_device_stats *stats = (struct net_device_stats *)data;
|
||
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
|
||
+
|
||
+ if (unlikely(csr8)) {
|
||
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
|
||
+ stats->rx_over_errors += 0x800;
|
||
+ x->rx_overflow_cntr += 0x800;
|
||
+ } else {
|
||
+ unsigned int ove_cntr;
|
||
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
|
||
+ stats->rx_over_errors += ove_cntr;
|
||
+ x->rx_overflow_cntr += ove_cntr;
|
||
+ }
|
||
+
|
||
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
|
||
+ stats->rx_missed_errors += 0xffff;
|
||
+ x->rx_missed_cntr += 0xffff;
|
||
+ } else {
|
||
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
|
||
+ stats->rx_missed_errors += miss_f;
|
||
+ x->rx_missed_cntr += miss_f;
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+const struct stmmac_dma_ops dwmac100_dma_ops = {
|
||
+ .init = dwmac100_dma_init,
|
||
+ .dump_regs = dwmac100_dump_dma_regs,
|
||
+ .dma_mode = dwmac100_dma_operation_mode,
|
||
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
|
||
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
|
||
+ .enable_dma_irq = dwmac_enable_dma_irq,
|
||
+ .disable_dma_irq = dwmac_disable_dma_irq,
|
||
+ .start_tx = dwmac_dma_start_tx,
|
||
+ .stop_tx = dwmac_dma_stop_tx,
|
||
+ .start_rx = dwmac_dma_start_rx,
|
||
+ .stop_rx = dwmac_dma_stop_rx,
|
||
+ .dma_interrupt = dwmac_dma_interrupt,
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac100.h linux-3.0.101.xm510/drivers/net/xmmac/dwmac100.h
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac100.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac100.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,172 @@
|
||
+/*******************************************************************************
|
||
+ MAC 10/100 Header File
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/phy.h>
|
||
+#include "common.h"
|
||
+
|
||
+/*----------------------------------------------------------------------------
|
||
+ * MAC BLOCK defines
|
||
+ *---------------------------------------------------------------------------*/
|
||
+/* MAC CSR offset */
|
||
+#define MAC_CONTROL 0x00000000 /* MAC Control */
|
||
+
|
||
+/*********************************************************************
|
||
+ * 1.Edited by ZengChuanJie
|
||
+ ********************************************************************/
|
||
+#define MAC_FRAME_FLT 0x00000004 /* MAC Frame Filter */
|
||
+#define MAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
|
||
+#define MAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
|
||
+#define MAC_MII_ADDR 0x00000010 /* MII Address */
|
||
+#define MAC_MII_DATA 0x00000014 /* MII Data */
|
||
+#define MAC_FLOW_CTRL 0x00000018 /* Flow Control */
|
||
+#define MAC_INT_STATUS 0x00000038 /* interrupt status */
|
||
+#define MAC_INT_MASK 0x0000003c /* interrupt mask */
|
||
+#define MAC_ADDR0_HIGH 0x00000040 /* MAC Address High */
|
||
+#define MAC_ADDR0_LOW 0x00000044 /* MAC Address Low */
|
||
+#define MAC_ADDR1_HIGH 0x00000048 /* MAC Address High */
|
||
+#define MAC_ADDR1_LOW 0x0000004c /* MAC Address Low */
|
||
+#define MAC_WATCHDOG_TO 0x000000dc /* Watchdog Timeout */
|
||
+/*********************************************************************
|
||
+ * end
|
||
+ * ******************************************************************/
|
||
+
|
||
+
|
||
+/* MAC CTRL defines */
|
||
+#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
|
||
+#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
|
||
+
|
||
+
|
||
+/* ******************************************************************
|
||
+ * 2.Edited By ZengChuanJie
|
||
+ * ******************************************************************/
|
||
+// #define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
|
||
+/********************************************************************
|
||
+ * end
|
||
+ * *******************************************************************/
|
||
+
|
||
+
|
||
+#define MAC_CONTROL_PS 0x08000000 /* Port Select */
|
||
+#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
|
||
+#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
|
||
+#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
|
||
+#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
|
||
+
|
||
+/* *************************************************************
|
||
+ * 5.Edited By ZengChuanJie
|
||
+ * ************************************************************/
|
||
+#define MAC_CONTROL_PM 0x00000010 /* Pass All Multicast */
|
||
+#define MAC_CONTROL_PR 0x00000001 /* Promiscuous Mode */
|
||
+#define MAC_CONTROL_IF 0x00000100 /* Inverse Filtering */
|
||
+#define MAC_CONTROL_HP 0x00000400 /* Hash/Perfect Filtering Mode */
|
||
+/* ************************************************************
|
||
+ * end
|
||
+ * ************************************************************/
|
||
+#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
|
||
+#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
|
||
+#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
|
||
+
|
||
+
|
||
+
|
||
+/* *******************************************************************
|
||
+ * 3.Edited by ZengChuanJie
|
||
+ * ******************************************************************/
|
||
+#define MAC_CONTROL_ASTP 0x00000080 /* Automatic Pad Stripping */
|
||
+/************************
|
||
+ * end
|
||
+ ***********************/
|
||
+
|
||
+
|
||
+
|
||
+#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
|
||
+#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
|
||
+#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
|
||
+#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
|
||
+#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
|
||
+#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
|
||
+#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
|
||
+
|
||
+
|
||
+/*************************************
|
||
+ * 4.Edited by ZengChuanJie
|
||
+ * **********************************/
|
||
+#define MAC_CORE_INIT MAC_CONTROL_ASTP
|
||
+/* end */
|
||
+
|
||
+/* MAC FLOW CTRL defines */
|
||
+#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
|
||
+#define MAC_FLOW_CTRL_PT_SHIFT 16
|
||
+
|
||
+/* **********************************
|
||
+ * 6.Edited by ZengCHuanJie
|
||
+ * *********************************/
|
||
+//#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
|
||
+#define MAC_FLOW_CTRL_ENABLE 0x00000006 /* Flow Control Enable */
|
||
+/* end */
|
||
+
|
||
+
|
||
+#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
|
||
+
|
||
+/* MII ADDR defines */
|
||
+#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
|
||
+#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
|
||
+
|
||
+/*----------------------------------------------------------------------------
|
||
+ * DMA BLOCK defines
|
||
+ *---------------------------------------------------------------------------*/
|
||
+
|
||
+/* DMA Bus Mode register defines */
|
||
+#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
|
||
+#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
|
||
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
|
||
+#define DMA_BUS_MODE_PBL_SHIFT 8
|
||
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
|
||
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
|
||
+#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
|
||
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
|
||
+#define DMA_BUS_MODE_DEFAULT 0x00000000
|
||
+
|
||
+/* DMA Control register defines */
|
||
+#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
|
||
+
|
||
+/* Transmit Threshold Control */
|
||
+enum ttc_control {
|
||
+ DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
|
||
+ DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
|
||
+ DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
|
||
+ DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
|
||
+ DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
|
||
+ DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
|
||
+ DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
|
||
+ DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
|
||
+ DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
|
||
+ DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
|
||
+};
|
||
+
|
||
+/* STMAC110 DMA Missed Frame Counter register defines */
|
||
+#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
|
||
+#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
|
||
+#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
|
||
+#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
|
||
+
|
||
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac_dma.h linux-3.0.101.xm510/drivers/net/xmmac/dwmac_dma.h
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac_dma.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac_dma.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,108 @@
|
||
+/*******************************************************************************
|
||
+ DWMAC DMA Header file.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+/* DMA CRS Control and Status Register Mapping */
|
||
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
|
||
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
|
||
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
|
||
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
|
||
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
|
||
+#define DMA_STATUS 0x00001014 /* Status Register */
|
||
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
|
||
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
|
||
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
|
||
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
|
||
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
|
||
+
|
||
+/* DMA Control register defines */
|
||
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
|
||
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
|
||
+
|
||
+/* DMA Normal interrupt */
|
||
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
|
||
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
|
||
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
|
||
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
|
||
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
|
||
+
|
||
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
|
||
+ DMA_INTR_ENA_TIE)
|
||
+
|
||
+/* DMA Abnormal interrupt */
|
||
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
|
||
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
|
||
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
|
||
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
|
||
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
|
||
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
|
||
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
|
||
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
|
||
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
|
||
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
|
||
+
|
||
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
|
||
+ DMA_INTR_ENA_UNE)
|
||
+
|
||
+/* DMA default interrupt mask */
|
||
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
|
||
+
|
||
+/* DMA Status register defines */
|
||
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
|
||
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
|
||
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
|
||
+#define DMA_STATUS_GMI 0x08000000
|
||
+#define DMA_STATUS_GLI 0x04000000
|
||
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
|
||
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
|
||
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
|
||
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
|
||
+#define DMA_STATUS_TS_SHIFT 20
|
||
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
|
||
+#define DMA_STATUS_RS_SHIFT 17
|
||
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
|
||
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
|
||
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
|
||
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
|
||
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
|
||
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
|
||
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
|
||
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
|
||
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
|
||
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
|
||
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
|
||
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
|
||
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
|
||
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
|
||
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
|
||
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
|
||
+
|
||
+extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
|
||
+extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
|
||
+extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
|
||
+extern void dwmac_dma_start_tx(void __iomem *ioaddr);
|
||
+extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
|
||
+extern void dwmac_dma_start_rx(void __iomem *ioaddr);
|
||
+extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
|
||
+extern int dwmac_dma_interrupt(void __iomem *ioaddr,
|
||
+ struct stmmac_extra_stats *x);
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/dwmac_lib.c linux-3.0.101.xm510/drivers/net/xmmac/dwmac_lib.c
|
||
--- linux-3.0.101/drivers/net/xmmac/dwmac_lib.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/dwmac_lib.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,258 @@
|
||
+/*******************************************************************************
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/io.h>
|
||
+#include "common.h"
|
||
+#include "dwmac_dma.h"
|
||
+
|
||
+#undef DWMAC_DMA_DEBUG
|
||
+#ifdef DWMAC_DMA_DEBUG
|
||
+#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
|
||
+#else
|
||
+#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
|
||
+#endif
|
||
+
|
||
+/* CSR1 enables the transmit DMA to check for new descriptor */
|
||
+void dwmac_enable_dma_transmission(void __iomem *ioaddr)
|
||
+{
|
||
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
|
||
+}
|
||
+
|
||
+void dwmac_enable_dma_irq(void __iomem *ioaddr)
|
||
+{
|
||
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||
+}
|
||
+
|
||
+void dwmac_disable_dma_irq(void __iomem *ioaddr)
|
||
+{
|
||
+ writel(0, ioaddr + DMA_INTR_ENA);
|
||
+}
|
||
+
|
||
+void dwmac_dma_start_tx(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + DMA_CONTROL);
|
||
+ value |= DMA_CONTROL_ST;
|
||
+ writel(value, ioaddr + DMA_CONTROL);
|
||
+}
|
||
+
|
||
+void dwmac_dma_stop_tx(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + DMA_CONTROL);
|
||
+ value &= ~DMA_CONTROL_ST;
|
||
+ writel(value, ioaddr + DMA_CONTROL);
|
||
+}
|
||
+
|
||
+void dwmac_dma_start_rx(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + DMA_CONTROL);
|
||
+ value |= DMA_CONTROL_SR;
|
||
+ writel(value, ioaddr + DMA_CONTROL);
|
||
+}
|
||
+
|
||
+void dwmac_dma_stop_rx(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + DMA_CONTROL);
|
||
+ value &= ~DMA_CONTROL_SR;
|
||
+ writel(value, ioaddr + DMA_CONTROL);
|
||
+}
|
||
+
|
||
+#ifdef DWMAC_DMA_DEBUG
|
||
+static void show_tx_process_state(unsigned int status)
|
||
+{
|
||
+ unsigned int state;
|
||
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
|
||
+
|
||
+ switch (state) {
|
||
+ case 0:
|
||
+ pr_info("- TX (Stopped): Reset or Stop command\n");
|
||
+ break;
|
||
+ case 1:
|
||
+ pr_info("- TX (Running):Fetching the Tx desc\n");
|
||
+ break;
|
||
+ case 2:
|
||
+ pr_info("- TX (Running): Waiting for end of tx\n");
|
||
+ break;
|
||
+ case 3:
|
||
+ pr_info("- TX (Running): Reading the data "
|
||
+ "and queuing the data into the Tx buf\n");
|
||
+ break;
|
||
+ case 6:
|
||
+ pr_info("- TX (Suspended): Tx Buff Underflow "
|
||
+ "or an unavailable Transmit descriptor\n");
|
||
+ break;
|
||
+ case 7:
|
||
+ pr_info("- TX (Running): Closing Tx descriptor\n");
|
||
+ break;
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+}
|
||
+
|
||
+static void show_rx_process_state(unsigned int status)
|
||
+{
|
||
+ unsigned int state;
|
||
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
|
||
+
|
||
+ switch (state) {
|
||
+ case 0:
|
||
+ pr_info("- RX (Stopped): Reset or Stop command\n");
|
||
+ break;
|
||
+ case 1:
|
||
+ pr_info("- RX (Running): Fetching the Rx desc\n");
|
||
+ break;
|
||
+ case 2:
|
||
+ pr_info("- RX (Running):Checking for end of pkt\n");
|
||
+ break;
|
||
+ case 3:
|
||
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
|
||
+ break;
|
||
+ case 4:
|
||
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
|
||
+ break;
|
||
+ case 5:
|
||
+ pr_info("- RX (Running): Closing Rx descriptor\n");
|
||
+ break;
|
||
+ case 6:
|
||
+ pr_info("- RX(Running): Flushing the current frame"
|
||
+ " from the Rx buf\n");
|
||
+ break;
|
||
+ case 7:
|
||
+ pr_info("- RX (Running): Queuing the Rx frame"
|
||
+ " from the Rx buf into memory\n");
|
||
+ break;
|
||
+ default:
|
||
+ break;
|
||
+ }
|
||
+}
|
||
+#endif
|
||
+
|
||
+int dwmac_dma_interrupt(void __iomem *ioaddr,
|
||
+ struct stmmac_extra_stats *x)
|
||
+{
|
||
+ int ret = 0;
|
||
+ /* read the status register (CSR5) */
|
||
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
|
||
+
|
||
+ DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
|
||
+#ifdef DWMAC_DMA_DEBUG
|
||
+ /* It displays the DMA process states (CSR5 register) */
|
||
+ show_tx_process_state(intr_status);
|
||
+ show_rx_process_state(intr_status);
|
||
+#endif
|
||
+ /* ABNORMAL interrupts */
|
||
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
|
||
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
|
||
+ ret = tx_hard_error_bump_tc;
|
||
+ x->tx_undeflow_irq++;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
|
||
+ x->tx_jabber_irq++;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
|
||
+ x->rx_overflow_irq++;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
|
||
+ x->rx_buf_unav_irq++;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
|
||
+ x->rx_process_stopped_irq++;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
|
||
+ x->rx_watchdog_irq++;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
|
||
+ x->tx_early_irq++;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
|
||
+ x->tx_process_stopped_irq++;
|
||
+ ret = tx_hard_error;
|
||
+ }
|
||
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
|
||
+ DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
|
||
+ x->fatal_bus_error_irq++;
|
||
+ ret = tx_hard_error;
|
||
+ }
|
||
+ }
|
||
+ /* TX/RX NORMAL interrupts */
|
||
+ if (intr_status & DMA_STATUS_NIS) {
|
||
+ x->normal_irq_n++;
|
||
+ if (likely((intr_status & DMA_STATUS_RI) ||
|
||
+ (intr_status & (DMA_STATUS_TI))))
|
||
+ ret = handle_tx_rx;
|
||
+ }
|
||
+ /* Optional hardware blocks, interrupts should be disabled */
|
||
+ if (unlikely(intr_status &
|
||
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
|
||
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
|
||
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
|
||
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
|
||
+
|
||
+ DWMAC_LIB_DBG(KERN_INFO "\n\n");
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
|
||
+
|
||
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
|
||
+}
|
||
+
|
||
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
|
||
+ unsigned int high, unsigned int low)
|
||
+{
|
||
+ unsigned long data;
|
||
+
|
||
+ data = (addr[5] << 8) | addr[4];
|
||
+ writel(data, ioaddr + high);
|
||
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
|
||
+ writel(data, ioaddr + low);
|
||
+}
|
||
+
|
||
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int high, unsigned int low)
|
||
+{
|
||
+ unsigned int hi_addr, lo_addr;
|
||
+
|
||
+ /* Read the MAC address from the hardware */
|
||
+ hi_addr = readl(ioaddr + high);
|
||
+ lo_addr = readl(ioaddr + low);
|
||
+
|
||
+ /* Extract the MAC address from the high and low words */
|
||
+ addr[0] = lo_addr & 0xff;
|
||
+ addr[1] = (lo_addr >> 8) & 0xff;
|
||
+ addr[2] = (lo_addr >> 16) & 0xff;
|
||
+ addr[3] = (lo_addr >> 24) & 0xff;
|
||
+ addr[4] = hi_addr & 0xff;
|
||
+ addr[5] = (hi_addr >> 8) & 0xff;
|
||
+}
|
||
+
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/enh_desc.c linux-3.0.101.xm510/drivers/net/xmmac/enh_desc.c
|
||
--- linux-3.0.101/drivers/net/xmmac/enh_desc.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/enh_desc.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,337 @@
|
||
+/*******************************************************************************
|
||
+ This contains the functions to handle the enhanced descriptors.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include "common.h"
|
||
+
|
||
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p, void __iomem *ioaddr)
|
||
+{
|
||
+ int ret = 0;
|
||
+ struct net_device_stats *stats = (struct net_device_stats *)data;
|
||
+
|
||
+ if (unlikely(p->des01.etx.error_summary)) {
|
||
+ CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
|
||
+ if (unlikely(p->des01.etx.jabber_timeout)) {
|
||
+ CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
|
||
+ x->tx_jabber++;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.etx.frame_flushed)) {
|
||
+ CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
|
||
+ x->tx_frame_flushed++;
|
||
+ dwmac_dma_flush_tx_fifo(ioaddr);
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.etx.loss_carrier)) {
|
||
+ CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
|
||
+ x->tx_losscarrier++;
|
||
+ stats->tx_carrier_errors++;
|
||
+ }
|
||
+ if (unlikely(p->des01.etx.no_carrier)) {
|
||
+ CHIP_DBG(KERN_ERR "\tno_carrier error\n");
|
||
+ x->tx_carrier++;
|
||
+ stats->tx_carrier_errors++;
|
||
+ }
|
||
+ if (unlikely(p->des01.etx.late_collision)) {
|
||
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
|
||
+ stats->collisions += p->des01.etx.collision_count;
|
||
+ }
|
||
+ if (unlikely(p->des01.etx.excessive_collisions)) {
|
||
+ CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
|
||
+ stats->collisions += p->des01.etx.collision_count;
|
||
+ }
|
||
+ if (unlikely(p->des01.etx.excessive_deferral)) {
|
||
+ CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
|
||
+ x->tx_deferred++;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.etx.underflow_error)) {
|
||
+ CHIP_DBG(KERN_ERR "\tunderflow error\n");
|
||
+ dwmac_dma_flush_tx_fifo(ioaddr);
|
||
+ x->tx_underflow++;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.etx.ip_header_error)) {
|
||
+ CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
|
||
+ x->tx_ip_header_error++;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.etx.payload_error)) {
|
||
+ CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
|
||
+ x->tx_payload_error++;
|
||
+ dwmac_dma_flush_tx_fifo(ioaddr);
|
||
+ }
|
||
+
|
||
+ ret = -1;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.etx.deferred)) {
|
||
+ CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
|
||
+ x->tx_deferred++;
|
||
+ }
|
||
+#ifdef STMMAC_VLAN_TAG_USED
|
||
+ if (p->des01.etx.vlan_frame) {
|
||
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
|
||
+ x->tx_vlan++;
|
||
+ }
|
||
+#endif
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int enh_desc_get_tx_len(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.etx.buffer1_size;
|
||
+}
|
||
+
|
||
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
|
||
+{
|
||
+ int ret = good_frame;
|
||
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
|
||
+
|
||
+ /* bits 5 7 0 | Frame status
|
||
+ * ----------------------------------------------------------
|
||
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
|
||
+ * 1 0 0 | IPv4/6 No CSUM errorS.
|
||
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
|
||
+ * 1 1 0 | IPv4/6 CSUM IP HR error
|
||
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
|
||
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
|
||
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
|
||
+ * 0 1 0 | Reserved.
|
||
+ */
|
||
+ if (status == 0x0) {
|
||
+ CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
|
||
+ ret = llc_snap;
|
||
+ } else if (status == 0x4) {
|
||
+ CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
|
||
+ ret = good_frame;
|
||
+ } else if (status == 0x5) {
|
||
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
|
||
+ ret = csum_none;
|
||
+ } else if (status == 0x6) {
|
||
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
|
||
+ ret = csum_none;
|
||
+ } else if (status == 0x7) {
|
||
+ CHIP_DBG(KERN_ERR
|
||
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
|
||
+ ret = csum_none;
|
||
+ } else if (status == 0x1) {
|
||
+ CHIP_DBG(KERN_ERR
|
||
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
|
||
+ ret = discard_frame;
|
||
+ } else if (status == 0x3) {
|
||
+ CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p)
|
||
+{
|
||
+ int ret = good_frame;
|
||
+ struct net_device_stats *stats = (struct net_device_stats *)data;
|
||
+
|
||
+ if (unlikely(p->des01.erx.error_summary)) {
|
||
+ CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
|
||
+ p->des01.erx);
|
||
+ if (unlikely(p->des01.erx.descriptor_error)) {
|
||
+ CHIP_DBG(KERN_ERR "\tdescriptor error\n");
|
||
+ x->rx_desc++;
|
||
+ stats->rx_length_errors++;
|
||
+ }
|
||
+ if (unlikely(p->des01.erx.overflow_error)) {
|
||
+ CHIP_DBG(KERN_ERR "\toverflow error\n");
|
||
+ x->rx_gmac_overflow++;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.erx.ipc_csum_error))
|
||
+ CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
|
||
+
|
||
+ if (unlikely(p->des01.erx.late_collision)) {
|
||
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
|
||
+ stats->collisions++;
|
||
+ stats->collisions++;
|
||
+ }
|
||
+ if (unlikely(p->des01.erx.receive_watchdog)) {
|
||
+ CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
|
||
+ x->rx_watchdog++;
|
||
+ }
|
||
+ if (unlikely(p->des01.erx.error_gmii)) {
|
||
+ CHIP_DBG(KERN_ERR "\tReceive Error\n");
|
||
+ //x->rx_mii++;
|
||
+ }
|
||
+ if (unlikely(p->des01.erx.crc_error)) {
|
||
+ CHIP_DBG(KERN_ERR "\tCRC error\n");
|
||
+ x->rx_crc++;
|
||
+ stats->rx_crc_errors++;
|
||
+ }
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+
|
||
+ /* After a payload csum error, the ES bit is set.
|
||
+ * It doesn't match with the information reported into the databook.
|
||
+ * At any rate, we need to understand if the CSUM hw computation is ok
|
||
+ * and report this info to the upper layers. */
|
||
+ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
|
||
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
|
||
+
|
||
+ if (unlikely(p->des01.erx.dribbling)) {
|
||
+ CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
|
||
+ CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
|
||
+ x->sa_rx_filter_fail++;
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ if (unlikely(p->des01.erx.da_filter_fail)) {
|
||
+ CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
|
||
+ x->da_rx_filter_fail++;
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ if (unlikely(p->des01.erx.length_error)) {
|
||
+ CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
|
||
+ x->rx_length++;
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+#ifdef STMMAC_VLAN_TAG_USED
|
||
+ if (p->des01.erx.vlan_tag) {
|
||
+ CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
|
||
+ x->rx_vlan++;
|
||
+ }
|
||
+#endif
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
|
||
+ int disable_rx_ic)
|
||
+{
|
||
+ int i;
|
||
+ for (i = 0; i < ring_size; i++) {
|
||
+ p->des01.erx.own = 1;
|
||
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
|
||
+ /* To support jumbo frames */
|
||
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
|
||
+ if (i == ring_size - 1)
|
||
+ p->des01.erx.end_ring = 1;
|
||
+ if (disable_rx_ic)
|
||
+ p->des01.erx.disable_ic = 1;
|
||
+ p++;
|
||
+ }
|
||
+}
|
||
+
|
||
+static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
|
||
+{
|
||
+ int i;
|
||
+
|
||
+ for (i = 0; i < ring_size; i++) {
|
||
+ p->des01.etx.own = 0;
|
||
+ if (i == ring_size - 1)
|
||
+ p->des01.etx.end_ring = 1;
|
||
+ p++;
|
||
+ }
|
||
+}
|
||
+
|
||
+static int enh_desc_get_tx_owner(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.etx.own;
|
||
+}
|
||
+
|
||
+static int enh_desc_get_rx_owner(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.erx.own;
|
||
+}
|
||
+
|
||
+static void enh_desc_set_tx_owner(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.etx.own = 1;
|
||
+}
|
||
+
|
||
+static void enh_desc_set_rx_owner(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.erx.own = 1;
|
||
+}
|
||
+
|
||
+static int enh_desc_get_tx_ls(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.etx.last_segment;
|
||
+}
|
||
+
|
||
+static void enh_desc_release_tx_desc(struct dma_desc *p)
|
||
+{
|
||
+ int ter = p->des01.etx.end_ring;
|
||
+
|
||
+ memset(p, 0, offsetof(struct dma_desc, des2));
|
||
+ p->des01.etx.end_ring = ter;
|
||
+}
|
||
+
|
||
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||
+ int csum_flag)
|
||
+{
|
||
+ p->des01.etx.first_segment = is_fs;
|
||
+ if (unlikely(len > BUF_SIZE_4KiB)) {
|
||
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
|
||
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
|
||
+ } else {
|
||
+ p->des01.etx.buffer1_size = len;
|
||
+ }
|
||
+ if (likely(csum_flag))
|
||
+ p->des01.etx.checksum_insertion = cic_full;
|
||
+}
|
||
+
|
||
+static void enh_desc_clear_tx_ic(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.etx.interrupt = 0;
|
||
+}
|
||
+
|
||
+static void enh_desc_close_tx_desc(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.etx.last_segment = 1;
|
||
+ p->des01.etx.interrupt = 1;
|
||
+}
|
||
+
|
||
+static int enh_desc_get_rx_frame_len(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.erx.frame_length;
|
||
+}
|
||
+
|
||
+const struct stmmac_desc_ops enh_desc_ops = {
|
||
+ .tx_status = enh_desc_get_tx_status,
|
||
+ .rx_status = enh_desc_get_rx_status,
|
||
+ .get_tx_len = enh_desc_get_tx_len,
|
||
+ .init_rx_desc = enh_desc_init_rx_desc,
|
||
+ .init_tx_desc = enh_desc_init_tx_desc,
|
||
+ .get_tx_owner = enh_desc_get_tx_owner,
|
||
+ .get_rx_owner = enh_desc_get_rx_owner,
|
||
+ .release_tx_desc = enh_desc_release_tx_desc,
|
||
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
|
||
+ .clear_tx_ic = enh_desc_clear_tx_ic,
|
||
+ .close_tx_desc = enh_desc_close_tx_desc,
|
||
+ .get_tx_ls = enh_desc_get_tx_ls,
|
||
+ .set_tx_owner = enh_desc_set_tx_owner,
|
||
+ .set_rx_owner = enh_desc_set_rx_owner,
|
||
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/Kconfig linux-3.0.101.xm510/drivers/net/xmmac/Kconfig
|
||
--- linux-3.0.101/drivers/net/xmmac/Kconfig 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/Kconfig 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,57 @@
|
||
+config XMMAC_ETH
|
||
+ tristate "xm510 10/100/1000 Ethernet driver"
|
||
+ select MII
|
||
+ select PHYLIB
|
||
+ select CRC32
|
||
+ depends on NETDEVICES && HAS_IOMEM
|
||
+ help
|
||
+ This is the driver for the Ethernet IPs are built around a
|
||
+ Synopsys IP Core and only tested on the STMicroelectronics
|
||
+ platforms.
|
||
+
|
||
+if STMMAC_ETH
|
||
+
|
||
+config STMMAC_DA
|
||
+ bool "STMMAC DMA arbitration scheme"
|
||
+ default n
|
||
+ help
|
||
+ Selecting this option, rx has priority over Tx (only for Giga
|
||
+ Ethernet device).
|
||
+ By default, the DMA arbitration scheme is based on Round-robin
|
||
+ (rx:tx priority is 1:1).
|
||
+
|
||
+config STMMAC_DUAL_MAC
|
||
+ bool "STMMAC: dual mac support (EXPERIMENTAL)"
|
||
+ default n
|
||
+ depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
|
||
+ help
|
||
+ Some ST SoCs (for example the stx7141 and stx7200c2) have two
|
||
+ Ethernet Controllers. This option turns on the second Ethernet
|
||
+ device on this kind of platforms.
|
||
+
|
||
+config STMMAC_TIMER
|
||
+ bool "STMMAC Timer optimisation"
|
||
+ default n
|
||
+ depends on RTC_HCTOSYS_DEVICE
|
||
+ help
|
||
+ Use an external timer for mitigating the number of network
|
||
+ interrupts. Currently, for SH architectures, it is possible
|
||
+ to use the TMU channel 2 and the SH-RTC device.
|
||
+
|
||
+choice
|
||
+ prompt "Select Timer device"
|
||
+ depends on STMMAC_TIMER
|
||
+
|
||
+config STMMAC_TMU_TIMER
|
||
+ bool "TMU channel 2"
|
||
+ depends on CPU_SH4
|
||
+ help
|
||
+
|
||
+config STMMAC_RTC_TIMER
|
||
+ bool "Real time clock"
|
||
+ depends on RTC_CLASS
|
||
+ help
|
||
+
|
||
+endchoice
|
||
+
|
||
+endif
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/Makefile linux-3.0.101.xm510/drivers/net/xmmac/Makefile
|
||
--- linux-3.0.101/drivers/net/xmmac/Makefile 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,5 @@
|
||
+obj-$(CONFIG_XMMAC_ETH) += xmmac.o
|
||
+stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
|
||
+xmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
|
||
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
|
||
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/norm_desc.c linux-3.0.101.xm510/drivers/net/xmmac/norm_desc.c
|
||
--- linux-3.0.101/drivers/net/xmmac/norm_desc.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/norm_desc.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,221 @@
|
||
+/*******************************************************************************
|
||
+ This contains the functions to handle the normal descriptors.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include "common.h"
|
||
+
|
||
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p, void __iomem *ioaddr)
|
||
+{
|
||
+ int ret = 0;
|
||
+ struct net_device_stats *stats = (struct net_device_stats *)data;
|
||
+
|
||
+ /**************************************************
|
||
+ * Edited By ZengChuanJie
|
||
+ * ***********************************************/
|
||
+ if (unlikely(p->des01.tx.error_summary)) {
|
||
+ if (unlikely(p->des01.tx.underflow_error)) {
|
||
+ x->tx_underflow++;
|
||
+ stats->tx_fifo_errors++;
|
||
+ }
|
||
+ if (unlikely(p->des01.tx.no_carrier)) {
|
||
+ x->tx_carrier++;
|
||
+ stats->tx_carrier_errors++;
|
||
+ }
|
||
+ if (unlikely(p->des01.tx.loss_carrier)) {
|
||
+ x->tx_losscarrier++;
|
||
+ stats->tx_carrier_errors++;
|
||
+ }
|
||
+ if (unlikely((p->des01.tx.excessive_deferral) ||
|
||
+ (p->des01.tx.excessive_collisions) ||
|
||
+ (p->des01.tx.late_collision)))
|
||
+ stats->collisions += p->des01.tx.collision_count;
|
||
+ ret = -1;
|
||
+ }
|
||
+ if (unlikely(p->des01.tx.deferred))
|
||
+ x->tx_deferred++;
|
||
+ /******** end ****************/
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int ndesc_get_tx_len(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.tx.buffer1_size;
|
||
+}
|
||
+
|
||
+/* This function verifies if each incoming frame has some errors
|
||
+ * and, if required, updates the multicast statistics.
|
||
+ * In case of success, it returns csum_none because the device
|
||
+ * is not able to compute the csum in HW. */
|
||
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p)
|
||
+{
|
||
+ int ret = csum_none;
|
||
+ struct net_device_stats *stats = (struct net_device_stats *)data;
|
||
+
|
||
+ /************************************************
|
||
+ * Edited By ZengChuanJie
|
||
+ * *********************************************/
|
||
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
|
||
+ pr_warning("ndesc Error: Oversized Ethernet "
|
||
+ "frame spanned multiple buffers\n");
|
||
+ stats->rx_length_errors++;
|
||
+ return discard_frame;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.rx.error_summary)) {
|
||
+ if (unlikely(p->des01.rx.descriptor_error))
|
||
+ x->rx_desc++;
|
||
+ if (unlikely(p->des01.rx.SAF))
|
||
+ x->sa_rx_filter_fail++;
|
||
+ if (unlikely(p->des01.rx.overflow_error))
|
||
+ x->rx_gmac_overflow++;
|
||
+ if (unlikely(p->des01.rx.collision)) {
|
||
+ x->rx_collision++;
|
||
+ stats->collisions++;
|
||
+ }
|
||
+ if (unlikely(p->des01.rx.crc_error)) {
|
||
+ x->rx_crc++;
|
||
+ stats->rx_crc_errors++;
|
||
+ }
|
||
+
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ if (unlikely(p->des01.rx.dribbling))
|
||
+ ret = discard_frame;
|
||
+
|
||
+ if (unlikely(p->des01.rx.length_error)) {
|
||
+ x->rx_length++;
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.rx.filtering_fail)) {
|
||
+ x->da_rx_filter_fail++;
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ /***********end*********/
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
|
||
+ int disable_rx_ic)
|
||
+{
|
||
+ int i;
|
||
+ for (i = 0; i < ring_size; i++) {
|
||
+ p->des01.rx.own = 1;
|
||
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
|
||
+ if (i == ring_size - 1)
|
||
+ p->des01.rx.end_ring = 1;
|
||
+ if (disable_rx_ic)
|
||
+ p->des01.rx.disable_ic = 1;
|
||
+ p++;
|
||
+ }
|
||
+}
|
||
+
|
||
+static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
|
||
+{
|
||
+ int i;
|
||
+ for (i = 0; i < ring_size; i++) {
|
||
+ p->des01.tx.own = 0;
|
||
+ if (i == ring_size - 1)
|
||
+ p->des01.tx.end_ring = 1;
|
||
+ p++;
|
||
+ }
|
||
+}
|
||
+
|
||
+static int ndesc_get_tx_owner(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.tx.own;
|
||
+}
|
||
+
|
||
+static int ndesc_get_rx_owner(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.rx.own;
|
||
+}
|
||
+
|
||
+static void ndesc_set_tx_owner(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.tx.own = 1;
|
||
+}
|
||
+
|
||
+static void ndesc_set_rx_owner(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.rx.own = 1;
|
||
+}
|
||
+
|
||
+static int ndesc_get_tx_ls(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.tx.last_segment;
|
||
+}
|
||
+
|
||
+static void ndesc_release_tx_desc(struct dma_desc *p)
|
||
+{
|
||
+ int ter = p->des01.tx.end_ring;
|
||
+
|
||
+ memset(p, 0, offsetof(struct dma_desc, des2));
|
||
+ /* set termination field */
|
||
+ p->des01.tx.end_ring = ter;
|
||
+}
|
||
+
|
||
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||
+ int csum_flag)
|
||
+{
|
||
+ p->des01.tx.first_segment = is_fs;
|
||
+ p->des01.tx.buffer1_size = len;
|
||
+}
|
||
+
|
||
+static void ndesc_clear_tx_ic(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.tx.interrupt = 0;
|
||
+}
|
||
+
|
||
+static void ndesc_close_tx_desc(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.tx.last_segment = 1;
|
||
+ p->des01.tx.interrupt = 1;
|
||
+}
|
||
+
|
||
+static int ndesc_get_rx_frame_len(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.rx.frame_length;
|
||
+}
|
||
+
|
||
+const struct stmmac_desc_ops ndesc_ops = {
|
||
+ .tx_status = ndesc_get_tx_status,
|
||
+ .rx_status = ndesc_get_rx_status,
|
||
+ .get_tx_len = ndesc_get_tx_len,
|
||
+ .init_rx_desc = ndesc_init_rx_desc,
|
||
+ .init_tx_desc = ndesc_init_tx_desc,
|
||
+ .get_tx_owner = ndesc_get_tx_owner,
|
||
+ .get_rx_owner = ndesc_get_rx_owner,
|
||
+ .release_tx_desc = ndesc_release_tx_desc,
|
||
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
|
||
+ .clear_tx_ic = ndesc_clear_tx_ic,
|
||
+ .close_tx_desc = ndesc_close_tx_desc,
|
||
+ .get_tx_ls = ndesc_get_tx_ls,
|
||
+ .set_tx_owner = ndesc_set_tx_owner,
|
||
+ .set_rx_owner = ndesc_set_rx_owner,
|
||
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/source/common.h linux-3.0.101.xm510/drivers/net/xmmac/source/common.h
|
||
--- linux-3.0.101/drivers/net/xmmac/source/common.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/source/common.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,252 @@
|
||
+/*******************************************************************************
|
||
+ STMMAC Common Header File
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/netdevice.h>
|
||
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||
+#define STMMAC_VLAN_TAG_USED
|
||
+#include <linux/if_vlan.h>
|
||
+#endif
|
||
+
|
||
+#include "descs.h"
|
||
+
|
||
+#undef CHIP_DEBUG_PRINT
|
||
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
|
||
+/* #define CHIP_DEBUG_PRINT */
|
||
+
|
||
+#ifdef CHIP_DEBUG_PRINT
|
||
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
|
||
+#else
|
||
+#define CHIP_DBG(fmt, args...) do { } while (0)
|
||
+#endif
|
||
+
|
||
+#undef FRAME_FILTER_DEBUG
|
||
+/* #define FRAME_FILTER_DEBUG */
|
||
+
|
||
+struct stmmac_extra_stats {
|
||
+ /* Transmit errors */
|
||
+ unsigned long tx_underflow ____cacheline_aligned;
|
||
+ unsigned long tx_carrier;
|
||
+ unsigned long tx_losscarrier;
|
||
+ unsigned long tx_heartbeat;
|
||
+ unsigned long tx_deferred;
|
||
+ unsigned long tx_vlan;
|
||
+ unsigned long tx_jabber;
|
||
+ unsigned long tx_frame_flushed;
|
||
+ unsigned long tx_payload_error;
|
||
+ unsigned long tx_ip_header_error;
|
||
+ /* Receive errors */
|
||
+ unsigned long rx_desc;
|
||
+ unsigned long rx_partial;
|
||
+ unsigned long rx_runt;
|
||
+ unsigned long rx_toolong;
|
||
+ unsigned long rx_collision;
|
||
+ unsigned long rx_crc;
|
||
+ unsigned long rx_length;
|
||
+ unsigned long rx_mii;
|
||
+ unsigned long rx_multicast;
|
||
+ unsigned long rx_gmac_overflow;
|
||
+ unsigned long rx_watchdog;
|
||
+ unsigned long da_rx_filter_fail;
|
||
+ unsigned long sa_rx_filter_fail;
|
||
+ unsigned long rx_missed_cntr;
|
||
+ unsigned long rx_overflow_cntr;
|
||
+ unsigned long rx_vlan;
|
||
+ /* Tx/Rx IRQ errors */
|
||
+ unsigned long tx_undeflow_irq;
|
||
+ unsigned long tx_process_stopped_irq;
|
||
+ unsigned long tx_jabber_irq;
|
||
+ unsigned long rx_overflow_irq;
|
||
+ unsigned long rx_buf_unav_irq;
|
||
+ unsigned long rx_process_stopped_irq;
|
||
+ unsigned long rx_watchdog_irq;
|
||
+ unsigned long tx_early_irq;
|
||
+ unsigned long fatal_bus_error_irq;
|
||
+ /* Extra info */
|
||
+ unsigned long threshold;
|
||
+ unsigned long tx_pkt_n;
|
||
+ unsigned long rx_pkt_n;
|
||
+ unsigned long poll_n;
|
||
+ unsigned long sched_timer_n;
|
||
+ unsigned long normal_irq_n;
|
||
+};
|
||
+
|
||
+#define HASH_TABLE_SIZE 64
|
||
+#define PAUSE_TIME 0x200
|
||
+
|
||
+/* Flow Control defines */
|
||
+#define FLOW_OFF 0
|
||
+#define FLOW_RX 1
|
||
+#define FLOW_TX 2
|
||
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
|
||
+
|
||
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
|
||
+
|
||
+enum rx_frame_status { /* IPC status */
|
||
+ good_frame = 0,
|
||
+ discard_frame = 1,
|
||
+ csum_none = 2,
|
||
+ llc_snap = 4,
|
||
+};
|
||
+
|
||
+enum tx_dma_irq_status {
|
||
+ tx_hard_error = 1,
|
||
+ tx_hard_error_bump_tc = 2,
|
||
+ handle_tx_rx = 3,
|
||
+};
|
||
+
|
||
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
|
||
+#define BUF_SIZE_16KiB 16384
|
||
+#define BUF_SIZE_8KiB 8192
|
||
+#define BUF_SIZE_4KiB 4096
|
||
+#define BUF_SIZE_2KiB 2048
|
||
+
|
||
+/* Power Down and WOL */
|
||
+#define PMT_NOT_SUPPORTED 0
|
||
+#define PMT_SUPPORTED 1
|
||
+
|
||
+/* Common MAC defines */
|
||
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
|
||
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
|
||
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
|
||
+
|
||
+/* MAC Management Counters register */
|
||
+#define MMC_CONTROL 0x00000100 /* MMC Control */
|
||
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
|
||
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
|
||
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
|
||
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
|
||
+
|
||
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
|
||
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
|
||
+#define MMC_CONTROL_MAX_FRAME 0x7FF
|
||
+
|
||
+struct stmmac_desc_ops {
|
||
+ /* DMA RX descriptor ring initialization */
|
||
+ void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
|
||
+ int disable_rx_ic);
|
||
+ /* DMA TX descriptor ring initialization */
|
||
+ void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
|
||
+
|
||
+ /* Invoked by the xmit function to prepare the tx descriptor */
|
||
+ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
|
||
+ int csum_flag);
|
||
+ /* Set/get the owner of the descriptor */
|
||
+ void (*set_tx_owner) (struct dma_desc *p);
|
||
+ int (*get_tx_owner) (struct dma_desc *p);
|
||
+ /* Invoked by the xmit function to close the tx descriptor */
|
||
+ void (*close_tx_desc) (struct dma_desc *p);
|
||
+ /* Clean the tx descriptor as soon as the tx irq is received */
|
||
+ void (*release_tx_desc) (struct dma_desc *p);
|
||
+ /* Clear interrupt on tx frame completion. When this bit is
|
||
+ * set an interrupt happens as soon as the frame is transmitted */
|
||
+ void (*clear_tx_ic) (struct dma_desc *p);
|
||
+ /* Last tx segment reports the transmit status */
|
||
+ int (*get_tx_ls) (struct dma_desc *p);
|
||
+ /* Return the transmit status looking at the TDES1 */
|
||
+ int (*tx_status) (void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p, void __iomem *ioaddr);
|
||
+ /* Get the buffer size from the descriptor */
|
||
+ int (*get_tx_len) (struct dma_desc *p);
|
||
+ /* Handle extra events on specific interrupts hw dependent */
|
||
+ int (*get_rx_owner) (struct dma_desc *p);
|
||
+ void (*set_rx_owner) (struct dma_desc *p);
|
||
+ /* Get the receive frame size */
|
||
+ int (*get_rx_frame_len) (struct dma_desc *p);
|
||
+ /* Return the reception status looking at the RDES1 */
|
||
+ int (*rx_status) (void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p);
|
||
+};
|
||
+
|
||
+struct stmmac_dma_ops {
|
||
+ /* DMA core initialization */
|
||
+ int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
|
||
+ /* Dump DMA registers */
|
||
+ void (*dump_regs) (void __iomem *ioaddr);
|
||
+ /* Set tx/rx threshold in the csr6 register
|
||
+ * An invalid value enables the store-and-forward mode */
|
||
+ void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
|
||
+ /* To track extra statistic (if supported) */
|
||
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
|
||
+ void __iomem *ioaddr);
|
||
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
|
||
+ void (*enable_dma_irq) (void __iomem *ioaddr);
|
||
+ void (*disable_dma_irq) (void __iomem *ioaddr);
|
||
+ void (*start_tx) (void __iomem *ioaddr);
|
||
+ void (*stop_tx) (void __iomem *ioaddr);
|
||
+ void (*start_rx) (void __iomem *ioaddr);
|
||
+ void (*stop_rx) (void __iomem *ioaddr);
|
||
+ int (*dma_interrupt) (void __iomem *ioaddr,
|
||
+ struct stmmac_extra_stats *x);
|
||
+};
|
||
+
|
||
+struct stmmac_ops {
|
||
+ /* MAC core initialization */
|
||
+ void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
|
||
+ /* Support checksum offload engine */
|
||
+ int (*rx_coe) (void __iomem *ioaddr);
|
||
+ /* Dump MAC registers */
|
||
+ void (*dump_regs) (void __iomem *ioaddr);
|
||
+ /* Handle extra events on specific interrupts hw dependent */
|
||
+ void (*host_irq_status) (void __iomem *ioaddr);
|
||
+ /* Multicast filter setting */
|
||
+ void (*set_filter) (struct net_device *dev);
|
||
+ /* Flow control setting */
|
||
+ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
|
||
+ unsigned int fc, unsigned int pause_time);
|
||
+ /* Set power management mode (e.g. magic frame) */
|
||
+ void (*pmt) (void __iomem *ioaddr, unsigned long mode);
|
||
+ /* Set/Get Unicast MAC addresses */
|
||
+ void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n);
|
||
+ void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n);
|
||
+};
|
||
+
|
||
+struct mac_link {
|
||
+ int port;
|
||
+ int duplex;
|
||
+ int speed;
|
||
+};
|
||
+
|
||
+struct mii_regs {
|
||
+ unsigned int addr; /* MII Address */
|
||
+ unsigned int data; /* MII Data */
|
||
+};
|
||
+
|
||
+struct mac_device_info {
|
||
+ const struct stmmac_ops *mac;
|
||
+ const struct stmmac_desc_ops *desc;
|
||
+ const struct stmmac_dma_ops *dma;
|
||
+ struct mii_regs mii; /* MII register Addresses */
|
||
+ struct mac_link link;
|
||
+};
|
||
+
|
||
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
|
||
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
|
||
+
|
||
+extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
|
||
+ unsigned int high, unsigned int low);
|
||
+extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int high, unsigned int low);
|
||
+extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/source/descs.h linux-3.0.101.xm510/drivers/net/xmmac/source/descs.h
|
||
--- linux-3.0.101/drivers/net/xmmac/source/descs.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/source/descs.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,163 @@
|
||
+/*******************************************************************************
|
||
+ Header File to describe the DMA descriptors.
|
||
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+struct dma_desc {
|
||
+ /* Receive descriptor */
|
||
+ union {
|
||
+ struct {
|
||
+ /* RDES0 */
|
||
+ u32 reserved1:1;
|
||
+ u32 crc_error:1;
|
||
+ u32 dribbling:1;
|
||
+ u32 mii_error:1;
|
||
+ u32 receive_watchdog:1;
|
||
+ u32 frame_type:1;
|
||
+ u32 collision:1;
|
||
+ u32 frame_too_long:1;
|
||
+ u32 last_descriptor:1;
|
||
+ u32 first_descriptor:1;
|
||
+ u32 multicast_frame:1;
|
||
+ u32 run_frame:1;
|
||
+ u32 length_error:1;
|
||
+ u32 partial_frame_error:1;
|
||
+ u32 descriptor_error:1;
|
||
+ u32 error_summary:1;
|
||
+ u32 frame_length:14;
|
||
+ u32 filtering_fail:1;
|
||
+ u32 own:1;
|
||
+ /* RDES1 */
|
||
+ u32 buffer1_size:11;
|
||
+ u32 buffer2_size:11;
|
||
+ u32 reserved2:2;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 reserved3:5;
|
||
+ u32 disable_ic:1;
|
||
+ } rx;
|
||
+ struct {
|
||
+ /* RDES0 */
|
||
+ u32 payload_csum_error:1;
|
||
+ u32 crc_error:1;
|
||
+ u32 dribbling:1;
|
||
+ u32 error_gmii:1;
|
||
+ u32 receive_watchdog:1;
|
||
+ u32 frame_type:1;
|
||
+ u32 late_collision:1;
|
||
+ u32 ipc_csum_error:1;
|
||
+ u32 last_descriptor:1;
|
||
+ u32 first_descriptor:1;
|
||
+ u32 vlan_tag:1;
|
||
+ u32 overflow_error:1;
|
||
+ u32 length_error:1;
|
||
+ u32 sa_filter_fail:1;
|
||
+ u32 descriptor_error:1;
|
||
+ u32 error_summary:1;
|
||
+ u32 frame_length:14;
|
||
+ u32 da_filter_fail:1;
|
||
+ u32 own:1;
|
||
+ /* RDES1 */
|
||
+ u32 buffer1_size:13;
|
||
+ u32 reserved1:1;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 buffer2_size:13;
|
||
+ u32 reserved2:2;
|
||
+ u32 disable_ic:1;
|
||
+ } erx; /* -- enhanced -- */
|
||
+
|
||
+ /* Transmit descriptor */
|
||
+ struct {
|
||
+ /* TDES0 */
|
||
+ u32 deferred:1;
|
||
+ u32 underflow_error:1;
|
||
+ u32 excessive_deferral:1;
|
||
+ u32 collision_count:4;
|
||
+ u32 heartbeat_fail:1;
|
||
+ u32 excessive_collisions:1;
|
||
+ u32 late_collision:1;
|
||
+ u32 no_carrier:1;
|
||
+ u32 loss_carrier:1;
|
||
+ u32 reserved1:3;
|
||
+ u32 error_summary:1;
|
||
+ u32 reserved2:15;
|
||
+ u32 own:1;
|
||
+ /* TDES1 */
|
||
+ u32 buffer1_size:11;
|
||
+ u32 buffer2_size:11;
|
||
+ u32 reserved3:1;
|
||
+ u32 disable_padding:1;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 crc_disable:1;
|
||
+ u32 reserved4:2;
|
||
+ u32 first_segment:1;
|
||
+ u32 last_segment:1;
|
||
+ u32 interrupt:1;
|
||
+ } tx;
|
||
+ struct {
|
||
+ /* TDES0 */
|
||
+ u32 deferred:1;
|
||
+ u32 underflow_error:1;
|
||
+ u32 excessive_deferral:1;
|
||
+ u32 collision_count:4;
|
||
+ u32 vlan_frame:1;
|
||
+ u32 excessive_collisions:1;
|
||
+ u32 late_collision:1;
|
||
+ u32 no_carrier:1;
|
||
+ u32 loss_carrier:1;
|
||
+ u32 payload_error:1;
|
||
+ u32 frame_flushed:1;
|
||
+ u32 jabber_timeout:1;
|
||
+ u32 error_summary:1;
|
||
+ u32 ip_header_error:1;
|
||
+ u32 time_stamp_status:1;
|
||
+ u32 reserved1:2;
|
||
+ u32 second_address_chained:1;
|
||
+ u32 end_ring:1;
|
||
+ u32 checksum_insertion:2;
|
||
+ u32 reserved2:1;
|
||
+ u32 time_stamp_enable:1;
|
||
+ u32 disable_padding:1;
|
||
+ u32 crc_disable:1;
|
||
+ u32 first_segment:1;
|
||
+ u32 last_segment:1;
|
||
+ u32 interrupt:1;
|
||
+ u32 own:1;
|
||
+ /* TDES1 */
|
||
+ u32 buffer1_size:13;
|
||
+ u32 reserved3:3;
|
||
+ u32 buffer2_size:13;
|
||
+ u32 reserved4:3;
|
||
+ } etx; /* -- enhanced -- */
|
||
+ } des01;
|
||
+ unsigned int des2;
|
||
+ unsigned int des3;
|
||
+};
|
||
+
|
||
+/* Transmit checksum insertion control */
|
||
+enum tdes_csum_insertion {
|
||
+ cic_disabled = 0, /* Checksum Insertion Control */
|
||
+ cic_only_ip = 1, /* Only IP header */
|
||
+ cic_no_pseudoheader = 2, /* IP header but pseudoheader
|
||
+ * is not calculated */
|
||
+ cic_full = 3, /* IP header and pseudoheader */
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/source/_dwmac100_core.c linux-3.0.101.xm510/drivers/net/xmmac/source/_dwmac100_core.c
|
||
--- linux-3.0.101/drivers/net/xmmac/source/_dwmac100_core.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/source/_dwmac100_core.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,207 @@
|
||
+/*******************************************************************************
|
||
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
|
||
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
|
||
+
|
||
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
|
||
+ this code.
|
||
+
|
||
+ This only implements the mac core functions for this chip.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/crc32.h>
|
||
+#include "dwmac100.h"
|
||
+
|
||
+static void dwmac100_core_init(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + MAC_CONTROL);
|
||
+
|
||
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
|
||
+
|
||
+}
|
||
+
|
||
+static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
|
||
+{
|
||
+ pr_info("\t----------------------------------------------\n"
|
||
+ "\t DWMAC 100 CSR (base addr = 0x%p)\n"
|
||
+ "\t----------------------------------------------\n",
|
||
+ ioaddr);
|
||
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
|
||
+ readl(ioaddr + MAC_CONTROL));
|
||
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
|
||
+ readl(ioaddr + MAC_ADDR_HIGH));
|
||
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
|
||
+ readl(ioaddr + MAC_ADDR_LOW));
|
||
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
|
||
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
|
||
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
|
||
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
|
||
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
|
||
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
|
||
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
|
||
+ readl(ioaddr + MAC_VLAN1));
|
||
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
|
||
+ readl(ioaddr + MAC_VLAN2));
|
||
+ pr_info("\n\tMAC management counter registers\n");
|
||
+ pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
|
||
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
|
||
+ pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
|
||
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
|
||
+ pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
|
||
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
|
||
+ pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
|
||
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
|
||
+ pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
|
||
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
|
||
+}
|
||
+
|
||
+static void dwmac100_irq_status(void __iomem *ioaddr)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n)
|
||
+{
|
||
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
|
||
+}
|
||
+
|
||
+static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||
+ unsigned int reg_n)
|
||
+{
|
||
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
|
||
+}
|
||
+
|
||
+static void dwmac100_set_filter(struct net_device *dev)
|
||
+{
|
||
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
|
||
+ u32 value = readl(ioaddr + MAC_CONTROL);
|
||
+
|
||
+ if (dev->flags & IFF_PROMISC) {
|
||
+ /* 混杂模式下不会过滤任何地址 */
|
||
+ value |= MAC_CONTROL_PR;
|
||
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
|
||
+ MAC_CONTROL_HP);
|
||
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|
||
+ || (dev->flags & IFF_ALLMULTI)) {
|
||
+ /* netdev_mc_count获取设备已经有了的多播地址数量 */
|
||
+ value |= MAC_CONTROL_PM;
|
||
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
|
||
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
|
||
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
|
||
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
|
||
+ /*
|
||
+ * 由于set_filter没有对外开放,只有调用set_multilist_addr的时候会调用set_filter
|
||
+ * 所以基本上不会进行这个分支
|
||
+ */
|
||
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
|
||
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
|
||
+ } else {
|
||
+ u32 mc_filter[2];
|
||
+ struct netdev_hw_addr *ha;
|
||
+
|
||
+ /* Perfect filter mode for physical address and Hash
|
||
+ filter for multicast */
|
||
+ value |= MAC_CONTROL_HP;
|
||
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
|
||
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
|
||
+
|
||
+ memset(mc_filter, 0, sizeof(mc_filter));
|
||
+ netdev_for_each_mc_addr(ha, dev) {
|
||
+ /* 高低过滤寄存器,一个加起来只能支持64个多播地址,但有可能多个多播地址对应同一位 */
|
||
+ /* The upper 6 bits of the calculated CRC are used to
|
||
+ * index the contens of the hash table */
|
||
+ int bit_nr =
|
||
+ ether_crc(ETH_ALEN, ha->addr) >> 26;
|
||
+ /* The most significant bit determines the register to
|
||
+ * use (H/L) while the other 5 bits determine the bit
|
||
+ * within the register. */
|
||
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
|
||
+ }
|
||
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
|
||
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
|
||
+ }
|
||
+
|
||
+ writel(value, ioaddr + MAC_CONTROL);
|
||
+
|
||
+ CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
|
||
+ "HI 0x%08x, LO 0x%08x\n",
|
||
+ __func__, readl(ioaddr + MAC_CONTROL),
|
||
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
|
||
+}
|
||
+
|
||
+static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
|
||
+ unsigned int fc, unsigned int pause_time)
|
||
+{
|
||
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
|
||
+
|
||
+ if (duplex)
|
||
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
|
||
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
|
||
+}
|
||
+
|
||
+/* No PMT module supported for this Ethernet Controller.
|
||
+ * Tested on ST platforms only.
|
||
+ */
|
||
+static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+
|
||
+static const struct stmmac_ops dwmac100_ops = {
|
||
+ .core_init = dwmac100_core_init,
|
||
+ .rx_coe = dwmac100_rx_coe_supported,
|
||
+ .dump_regs = dwmac100_dump_mac_regs,
|
||
+ .host_irq_status = dwmac100_irq_status,
|
||
+ .set_filter = dwmac100_set_filter,
|
||
+ .flow_ctrl = dwmac100_flow_ctrl,
|
||
+ .pmt = dwmac100_pmt,
|
||
+ .set_umac_addr = dwmac100_set_umac_addr,
|
||
+ .get_umac_addr = dwmac100_get_umac_addr,
|
||
+};
|
||
+
|
||
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
|
||
+{
|
||
+ struct mac_device_info *mac;
|
||
+
|
||
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
|
||
+ if (!mac)
|
||
+ return NULL;
|
||
+
|
||
+ pr_info("\tDWMAC100\n");
|
||
+
|
||
+ mac->mac = &dwmac100_ops;
|
||
+ mac->dma = &dwmac100_dma_ops;
|
||
+
|
||
+ mac->link.port = MAC_CONTROL_PS;
|
||
+ mac->link.duplex = MAC_CONTROL_F;
|
||
+ mac->link.speed = 0;
|
||
+ mac->mii.addr = MAC_MII_ADDR;
|
||
+ mac->mii.data = MAC_MII_DATA;
|
||
+
|
||
+ return mac;
|
||
+}
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/source/Makefile linux-3.0.101.xm510/drivers/net/xmmac/source/Makefile
|
||
--- linux-3.0.101/drivers/net/xmmac/source/Makefile 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/source/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,5 @@
|
||
+obj-$(CONFIG_STMMAC_ETH) += stmmac.o
|
||
+stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
|
||
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
|
||
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
|
||
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/source/norm_desc.c linux-3.0.101.xm510/drivers/net/xmmac/source/norm_desc.c
|
||
--- linux-3.0.101/drivers/net/xmmac/source/norm_desc.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/source/norm_desc.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,221 @@
|
||
+/*******************************************************************************
|
||
+ This contains the functions to handle the normal descriptors.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include "common.h"
|
||
+
|
||
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p, void __iomem *ioaddr)
|
||
+{
|
||
+ int ret = 0;
|
||
+ struct net_device_stats *stats = (struct net_device_stats *)data;
|
||
+
|
||
+ if (unlikely(p->des01.tx.error_summary)) {
|
||
+ if (unlikely(p->des01.tx.underflow_error)) {
|
||
+ x->tx_underflow++;
|
||
+ stats->tx_fifo_errors++;
|
||
+ }
|
||
+ if (unlikely(p->des01.tx.no_carrier)) {
|
||
+ x->tx_carrier++;
|
||
+ stats->tx_carrier_errors++;
|
||
+ }
|
||
+ if (unlikely(p->des01.tx.loss_carrier)) {
|
||
+ x->tx_losscarrier++;
|
||
+ stats->tx_carrier_errors++;
|
||
+ }
|
||
+ if (unlikely((p->des01.tx.excessive_deferral) ||
|
||
+ (p->des01.tx.excessive_collisions) ||
|
||
+ (p->des01.tx.late_collision)))
|
||
+ stats->collisions += p->des01.tx.collision_count;
|
||
+ ret = -1;
|
||
+ }
|
||
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
|
||
+ x->tx_heartbeat++;
|
||
+ stats->tx_heartbeat_errors++;
|
||
+ ret = -1;
|
||
+ }
|
||
+ if (unlikely(p->des01.tx.deferred))
|
||
+ x->tx_deferred++;
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static int ndesc_get_tx_len(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.tx.buffer1_size;
|
||
+}
|
||
+
|
||
+/* This function verifies if each incoming frame has some errors
|
||
+ * and, if required, updates the multicast statistics.
|
||
+ * In case of success, it returns csum_none because the device
|
||
+ * is not able to compute the csum in HW. */
|
||
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
|
||
+ struct dma_desc *p)
|
||
+{
|
||
+ int ret = csum_none;
|
||
+ struct net_device_stats *stats = (struct net_device_stats *)data;
|
||
+
|
||
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
|
||
+ pr_warning("ndesc Error: Oversized Ethernet "
|
||
+ "frame spanned multiple buffers\n");
|
||
+ stats->rx_length_errors++;
|
||
+ return discard_frame;
|
||
+ }
|
||
+
|
||
+ if (unlikely(p->des01.rx.error_summary)) {
|
||
+ if (unlikely(p->des01.rx.descriptor_error))
|
||
+ x->rx_desc++;
|
||
+ if (unlikely(p->des01.rx.partial_frame_error))
|
||
+ x->rx_partial++;
|
||
+ if (unlikely(p->des01.rx.run_frame))
|
||
+ x->rx_runt++;
|
||
+ if (unlikely(p->des01.rx.frame_too_long))
|
||
+ x->rx_toolong++;
|
||
+ if (unlikely(p->des01.rx.collision)) {
|
||
+ x->rx_collision++;
|
||
+ stats->collisions++;
|
||
+ }
|
||
+ if (unlikely(p->des01.rx.crc_error)) {
|
||
+ x->rx_crc++;
|
||
+ stats->rx_crc_errors++;
|
||
+ }
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ if (unlikely(p->des01.rx.dribbling))
|
||
+ ret = discard_frame;
|
||
+
|
||
+ if (unlikely(p->des01.rx.length_error)) {
|
||
+ x->rx_length++;
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ if (unlikely(p->des01.rx.mii_error)) {
|
||
+ x->rx_mii++;
|
||
+ ret = discard_frame;
|
||
+ }
|
||
+ if (p->des01.rx.multicast_frame) {
|
||
+ x->rx_multicast++;
|
||
+ stats->multicast++;
|
||
+ }
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
|
||
+ int disable_rx_ic)
|
||
+{
|
||
+ int i;
|
||
+ for (i = 0; i < ring_size; i++) {
|
||
+ p->des01.rx.own = 1;
|
||
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
|
||
+ if (i == ring_size - 1)
|
||
+ p->des01.rx.end_ring = 1;
|
||
+ if (disable_rx_ic)
|
||
+ p->des01.rx.disable_ic = 1;
|
||
+ p++;
|
||
+ }
|
||
+}
|
||
+
|
||
+static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
|
||
+{
|
||
+ int i;
|
||
+ for (i = 0; i < ring_size; i++) {
|
||
+ p->des01.tx.own = 0;
|
||
+ if (i == ring_size - 1)
|
||
+ p->des01.tx.end_ring = 1;
|
||
+ p++;
|
||
+ }
|
||
+}
|
||
+
|
||
+static int ndesc_get_tx_owner(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.tx.own;
|
||
+}
|
||
+
|
||
+static int ndesc_get_rx_owner(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.rx.own;
|
||
+}
|
||
+
|
||
+static void ndesc_set_tx_owner(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.tx.own = 1;
|
||
+}
|
||
+
|
||
+static void ndesc_set_rx_owner(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.rx.own = 1;
|
||
+}
|
||
+
|
||
+static int ndesc_get_tx_ls(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.tx.last_segment;
|
||
+}
|
||
+
|
||
+static void ndesc_release_tx_desc(struct dma_desc *p)
|
||
+{
|
||
+ int ter = p->des01.tx.end_ring;
|
||
+
|
||
+ memset(p, 0, offsetof(struct dma_desc, des2));
|
||
+ /* set termination field */
|
||
+ p->des01.tx.end_ring = ter;
|
||
+}
|
||
+
|
||
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||
+ int csum_flag)
|
||
+{
|
||
+ p->des01.tx.first_segment = is_fs;
|
||
+ p->des01.tx.buffer1_size = len;
|
||
+}
|
||
+
|
||
+static void ndesc_clear_tx_ic(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.tx.interrupt = 0;
|
||
+}
|
||
+
|
||
+static void ndesc_close_tx_desc(struct dma_desc *p)
|
||
+{
|
||
+ p->des01.tx.last_segment = 1;
|
||
+ p->des01.tx.interrupt = 1;
|
||
+}
|
||
+
|
||
+static int ndesc_get_rx_frame_len(struct dma_desc *p)
|
||
+{
|
||
+ return p->des01.rx.frame_length;
|
||
+}
|
||
+
|
||
+const struct stmmac_desc_ops ndesc_ops = {
|
||
+ .tx_status = ndesc_get_tx_status,
|
||
+ .rx_status = ndesc_get_rx_status,
|
||
+ .get_tx_len = ndesc_get_tx_len,
|
||
+ .init_rx_desc = ndesc_init_rx_desc,
|
||
+ .init_tx_desc = ndesc_init_tx_desc,
|
||
+ .get_tx_owner = ndesc_get_tx_owner,
|
||
+ .get_rx_owner = ndesc_get_rx_owner,
|
||
+ .release_tx_desc = ndesc_release_tx_desc,
|
||
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
|
||
+ .clear_tx_ic = ndesc_clear_tx_ic,
|
||
+ .close_tx_desc = ndesc_close_tx_desc,
|
||
+ .get_tx_ls = ndesc_get_tx_ls,
|
||
+ .set_tx_owner = ndesc_set_tx_owner,
|
||
+ .set_rx_owner = ndesc_set_rx_owner,
|
||
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
|
||
+};
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/stmmac_ethtool.c linux-3.0.101.xm510/drivers/net/xmmac/stmmac_ethtool.c
|
||
--- linux-3.0.101/drivers/net/xmmac/stmmac_ethtool.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/stmmac_ethtool.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,357 @@
|
||
+/*******************************************************************************
|
||
+ STMMAC Ethtool support
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/etherdevice.h>
|
||
+#include <linux/ethtool.h>
|
||
+#include <linux/mii.h>
|
||
+#include <linux/phy.h>
|
||
+
|
||
+#include "stmmac.h"
|
||
+#include "dwmac_dma.h"
|
||
+
|
||
+#define REG_SPACE_SIZE 0x1054
|
||
+#define MAC100_ETHTOOL_NAME "st_mac100"
|
||
+#define GMAC_ETHTOOL_NAME "st_gmac"
|
||
+
|
||
+struct stmmac_stats {
|
||
+ char stat_string[ETH_GSTRING_LEN];
|
||
+ int sizeof_stat;
|
||
+ int stat_offset;
|
||
+};
|
||
+
|
||
+#define STMMAC_STAT(m) \
|
||
+ { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
|
||
+ offsetof(struct stmmac_priv, xstats.m)}
|
||
+
|
||
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
|
||
+ STMMAC_STAT(tx_underflow),
|
||
+ STMMAC_STAT(tx_carrier),
|
||
+ STMMAC_STAT(tx_losscarrier),
|
||
+ //STMMAC_STAT(tx_heartbeat),
|
||
+ STMMAC_STAT(tx_deferred),
|
||
+ STMMAC_STAT(tx_vlan),
|
||
+ STMMAC_STAT(rx_vlan),
|
||
+ STMMAC_STAT(tx_jabber),
|
||
+ STMMAC_STAT(tx_frame_flushed),
|
||
+ STMMAC_STAT(tx_payload_error),
|
||
+ STMMAC_STAT(tx_ip_header_error),
|
||
+ STMMAC_STAT(rx_desc),
|
||
+ //STMMAC_STAT(rx_partial),
|
||
+ //STMMAC_STAT(rx_runt),
|
||
+ //STMMAC_STAT(rx_toolong),
|
||
+ STMMAC_STAT(rx_collision),
|
||
+ STMMAC_STAT(rx_crc),
|
||
+ STMMAC_STAT(rx_length),
|
||
+ //STMMAC_STAT(rx_mii),
|
||
+ //STMMAC_STAT(rx_multicast),
|
||
+ STMMAC_STAT(rx_gmac_overflow),
|
||
+ STMMAC_STAT(rx_watchdog),
|
||
+ STMMAC_STAT(da_rx_filter_fail),
|
||
+ STMMAC_STAT(sa_rx_filter_fail),
|
||
+ STMMAC_STAT(rx_missed_cntr),
|
||
+ STMMAC_STAT(rx_overflow_cntr),
|
||
+ STMMAC_STAT(tx_undeflow_irq),
|
||
+ STMMAC_STAT(tx_process_stopped_irq),
|
||
+ STMMAC_STAT(tx_jabber_irq),
|
||
+ STMMAC_STAT(rx_overflow_irq),
|
||
+ STMMAC_STAT(rx_buf_unav_irq),
|
||
+ STMMAC_STAT(rx_process_stopped_irq),
|
||
+ STMMAC_STAT(rx_watchdog_irq),
|
||
+ STMMAC_STAT(tx_early_irq),
|
||
+ STMMAC_STAT(fatal_bus_error_irq),
|
||
+ STMMAC_STAT(threshold),
|
||
+ STMMAC_STAT(tx_pkt_n),
|
||
+ STMMAC_STAT(rx_pkt_n),
|
||
+ STMMAC_STAT(poll_n),
|
||
+ STMMAC_STAT(sched_timer_n),
|
||
+ STMMAC_STAT(normal_irq_n),
|
||
+};
|
||
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
|
||
+
|
||
+static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
|
||
+ struct ethtool_drvinfo *info)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ if (!priv->plat->has_gmac)
|
||
+ strcpy(info->driver, MAC100_ETHTOOL_NAME);
|
||
+ else
|
||
+ strcpy(info->driver, GMAC_ETHTOOL_NAME);
|
||
+
|
||
+ strcpy(info->version, DRV_MODULE_VERSION);
|
||
+ info->fw_version[0] = '\0';
|
||
+ info->n_stats = STMMAC_STATS_LEN;
|
||
+}
|
||
+
|
||
+static int stmmac_ethtool_getsettings(struct net_device *dev,
|
||
+ struct ethtool_cmd *cmd)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ struct phy_device *phy = priv->phydev;
|
||
+ int rc;
|
||
+ if (phy == NULL) {
|
||
+ pr_err("%s: %s: PHY is not registered\n",
|
||
+ __func__, dev->name);
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ if (!netif_running(dev)) {
|
||
+ pr_err("%s: interface is disabled: we cannot track "
|
||
+ "link speed / duplex setting\n", dev->name);
|
||
+ return -EBUSY;
|
||
+ }
|
||
+ cmd->transceiver = XCVR_INTERNAL;
|
||
+ spin_lock_irq(&priv->lock);
|
||
+ rc = phy_ethtool_gset(phy, cmd);
|
||
+ spin_unlock_irq(&priv->lock);
|
||
+ return rc;
|
||
+}
|
||
+
|
||
+static int stmmac_ethtool_setsettings(struct net_device *dev,
|
||
+ struct ethtool_cmd *cmd)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ struct phy_device *phy = priv->phydev;
|
||
+ int rc;
|
||
+
|
||
+ spin_lock(&priv->lock);
|
||
+ rc = phy_ethtool_sset(phy, cmd);
|
||
+ spin_unlock(&priv->lock);
|
||
+
|
||
+ return rc;
|
||
+}
|
||
+
|
||
+static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ return priv->msg_enable;
|
||
+}
|
||
+
|
||
+static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ priv->msg_enable = level;
|
||
+
|
||
+}
|
||
+
|
||
+static int stmmac_check_if_running(struct net_device *dev)
|
||
+{
|
||
+ if (!netif_running(dev))
|
||
+ return -EBUSY;
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int stmmac_ethtool_get_regs_len(struct net_device *dev)
|
||
+{
|
||
+ return REG_SPACE_SIZE;
|
||
+}
|
||
+
|
||
+static void stmmac_ethtool_gregs(struct net_device *dev,
|
||
+ struct ethtool_regs *regs, void *space)
|
||
+{
|
||
+ int i;
|
||
+ u32 *reg_space = (u32 *) space;
|
||
+
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
|
||
+
|
||
+ if (!priv->plat->has_gmac) {
|
||
+ /* MAC registers */
|
||
+ for (i = 0; i < 12; i++)
|
||
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
|
||
+ /* DMA registers */
|
||
+ for (i = 0; i < 9; i++)
|
||
+ reg_space[i + 12] =
|
||
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
|
||
+ reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
|
||
+ reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
|
||
+ } else {
|
||
+ /* MAC registers */
|
||
+ for (i = 0; i < 55; i++)
|
||
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
|
||
+ /* DMA registers */
|
||
+ for (i = 0; i < 22; i++)
|
||
+ reg_space[i + 55] =
|
||
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
|
||
+ }
|
||
+}
|
||
+
|
||
+static void
|
||
+stmmac_get_pauseparam(struct net_device *netdev,
|
||
+ struct ethtool_pauseparam *pause)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(netdev);
|
||
+
|
||
+ spin_lock(&priv->lock);
|
||
+
|
||
+ pause->rx_pause = 0;
|
||
+ pause->tx_pause = 0;
|
||
+ pause->autoneg = priv->phydev->autoneg;
|
||
+
|
||
+ if (priv->flow_ctrl & FLOW_RX)
|
||
+ pause->rx_pause = 1;
|
||
+ if (priv->flow_ctrl & FLOW_TX)
|
||
+ pause->tx_pause = 1;
|
||
+
|
||
+ spin_unlock(&priv->lock);
|
||
+}
|
||
+
|
||
+static int
|
||
+stmmac_set_pauseparam(struct net_device *netdev,
|
||
+ struct ethtool_pauseparam *pause)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(netdev);
|
||
+ struct phy_device *phy = priv->phydev;
|
||
+ int new_pause = FLOW_OFF;
|
||
+ int ret = 0;
|
||
+
|
||
+ spin_lock(&priv->lock);
|
||
+
|
||
+ if (pause->rx_pause)
|
||
+ new_pause |= FLOW_RX;
|
||
+ if (pause->tx_pause)
|
||
+ new_pause |= FLOW_TX;
|
||
+
|
||
+ priv->flow_ctrl = new_pause;
|
||
+ phy->autoneg = pause->autoneg;
|
||
+
|
||
+ if (phy->autoneg) {
|
||
+ if (netif_running(netdev))
|
||
+ ret = phy_start_aneg(phy);
|
||
+ } else
|
||
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
|
||
+ priv->flow_ctrl, priv->pause);
|
||
+ spin_unlock(&priv->lock);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void stmmac_get_ethtool_stats(struct net_device *dev,
|
||
+ struct ethtool_stats *dummy, u64 *data)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ int i;
|
||
+
|
||
+ /* Update HW stats if supported */
|
||
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
|
||
+ priv->ioaddr);
|
||
+
|
||
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
|
||
+ char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
|
||
+ data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
|
||
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
|
||
+ }
|
||
+}
|
||
+
|
||
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
|
||
+{
|
||
+ switch (sset) {
|
||
+ case ETH_SS_STATS:
|
||
+ return STMMAC_STATS_LEN;
|
||
+ default:
|
||
+ return -EOPNOTSUPP;
|
||
+ }
|
||
+}
|
||
+
|
||
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||
+{
|
||
+ int i;
|
||
+ u8 *p = data;
|
||
+
|
||
+ switch (stringset) {
|
||
+ case ETH_SS_STATS:
|
||
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
|
||
+ memcpy(p, stmmac_gstrings_stats[i].stat_string,
|
||
+ ETH_GSTRING_LEN);
|
||
+ p += ETH_GSTRING_LEN;
|
||
+ }
|
||
+ break;
|
||
+ default:
|
||
+ WARN_ON(1);
|
||
+ break;
|
||
+ }
|
||
+}
|
||
+
|
||
+/* Currently only support WOL through Magic packet. */
|
||
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ spin_lock_irq(&priv->lock);
|
||
+ if (device_can_wakeup(priv->device)) {
|
||
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
|
||
+ wol->wolopts = priv->wolopts;
|
||
+ }
|
||
+ spin_unlock_irq(&priv->lock);
|
||
+}
|
||
+
|
||
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
|
||
+
|
||
+ if (!device_can_wakeup(priv->device))
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (wol->wolopts & ~support)
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (wol->wolopts) {
|
||
+ pr_info("stmmac: wakeup enable\n");
|
||
+ device_set_wakeup_enable(priv->device, 1);
|
||
+ enable_irq_wake(dev->irq);
|
||
+ } else {
|
||
+ device_set_wakeup_enable(priv->device, 0);
|
||
+ disable_irq_wake(dev->irq);
|
||
+ }
|
||
+
|
||
+ spin_lock_irq(&priv->lock);
|
||
+ priv->wolopts = wol->wolopts;
|
||
+ spin_unlock_irq(&priv->lock);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct ethtool_ops stmmac_ethtool_ops = {
|
||
+ .begin = stmmac_check_if_running,
|
||
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
|
||
+ .get_settings = stmmac_ethtool_getsettings,
|
||
+ .set_settings = stmmac_ethtool_setsettings,
|
||
+ .get_msglevel = stmmac_ethtool_getmsglevel,
|
||
+ .set_msglevel = stmmac_ethtool_setmsglevel,
|
||
+ .get_regs = stmmac_ethtool_gregs,
|
||
+ .get_regs_len = stmmac_ethtool_get_regs_len,
|
||
+ .get_link = ethtool_op_get_link,
|
||
+ .get_pauseparam = stmmac_get_pauseparam,
|
||
+ .set_pauseparam = stmmac_set_pauseparam,
|
||
+ .get_ethtool_stats = stmmac_get_ethtool_stats,
|
||
+ .get_strings = stmmac_get_strings,
|
||
+ .get_wol = stmmac_get_wol,
|
||
+ .set_wol = stmmac_set_wol,
|
||
+ .get_sset_count = stmmac_get_sset_count,
|
||
+};
|
||
+
|
||
+void stmmac_set_ethtool_ops(struct net_device *netdev)
|
||
+{
|
||
+ SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
|
||
+}
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/stmmac.h linux-3.0.101.xm510/drivers/net/xmmac/stmmac.h
|
||
--- linux-3.0.101/drivers/net/xmmac/stmmac.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/stmmac.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,96 @@
|
||
+/*******************************************************************************
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#define DRV_MODULE_VERSION "Nov_2010"
|
||
+#include <linux/stmmac.h>
|
||
+
|
||
+#include "common.h"
|
||
+#ifdef CONFIG_STMMAC_TIMER
|
||
+#include "stmmac_timer.h"
|
||
+#endif
|
||
+
|
||
+struct stmmac_priv {
|
||
+ /* Frequently used values are kept adjacent for cache effect */
|
||
+ struct dma_desc *dma_tx ____cacheline_aligned;
|
||
+ dma_addr_t dma_tx_phy;
|
||
+ struct sk_buff **tx_skbuff;
|
||
+ unsigned int cur_tx;
|
||
+ unsigned int dirty_tx;
|
||
+ unsigned int dma_tx_size;
|
||
+ int tx_coalesce;
|
||
+
|
||
+ struct dma_desc *dma_rx ;
|
||
+ /*
|
||
+ * 在开始接收时,cur_rx=dirty_rx,主机每处理一帧cur_rx便向后面移动,
|
||
+ * 那么cur_rx和dirty_rx之间的距离便是接收了的帧数
|
||
+ */
|
||
+ unsigned int cur_rx;
|
||
+ unsigned int dirty_rx;
|
||
+ struct sk_buff **rx_skbuff;
|
||
+ dma_addr_t *rx_skbuff_dma;
|
||
+ struct sk_buff_head rx_recycle;
|
||
+
|
||
+ struct net_device *dev;
|
||
+ dma_addr_t dma_rx_phy;
|
||
+ unsigned int dma_rx_size;
|
||
+ unsigned int dma_buf_sz;
|
||
+ struct device *device;
|
||
+ struct mac_device_info *hw;
|
||
+ void __iomem *ioaddr;
|
||
+
|
||
+ struct stmmac_extra_stats xstats;
|
||
+ struct napi_struct napi;
|
||
+
|
||
+ phy_interface_t phy_interface;
|
||
+ int phy_addr;
|
||
+ int phy_mask;
|
||
+ int (*phy_reset) (void *priv);
|
||
+ int rx_coe;
|
||
+ int no_csum_insertion;
|
||
+
|
||
+ int phy_irq;
|
||
+ struct phy_device *phydev;
|
||
+ int oldlink;
|
||
+ int speed;
|
||
+ int oldduplex;
|
||
+ unsigned int flow_ctrl;
|
||
+ unsigned int pause;
|
||
+ struct mii_bus *mii;
|
||
+
|
||
+ u32 msg_enable;
|
||
+ spinlock_t lock;
|
||
+ int wolopts;
|
||
+ int wolenabled;
|
||
+#ifdef CONFIG_STMMAC_TIMER
|
||
+ struct stmmac_timer *tm;
|
||
+#endif
|
||
+#ifdef STMMAC_VLAN_TAG_USED
|
||
+ struct vlan_group *vlgrp;
|
||
+#endif
|
||
+ struct plat_stmmacenet_data *plat;
|
||
+};
|
||
+
|
||
+extern int stmmac_mdio_unregister(struct net_device *ndev);
|
||
+extern int stmmac_mdio_register(struct net_device *ndev);
|
||
+extern void stmmac_set_ethtool_ops(struct net_device *netdev);
|
||
+extern const struct stmmac_desc_ops enh_desc_ops;
|
||
+extern const struct stmmac_desc_ops ndesc_ops;
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/stmmac_main.c linux-3.0.101.xm510/drivers/net/xmmac/stmmac_main.c
|
||
--- linux-3.0.101/drivers/net/xmmac/stmmac_main.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/stmmac_main.c 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -0,0 +1,1983 @@
|
||
+/*
|
||
+ * Author: ZengChuanJie
|
||
+ */
|
||
+#include <linux/module.h>
|
||
+#include <linux/init.h>
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/interrupt.h>
|
||
+#include <linux/etherdevice.h>
|
||
+#include <linux/platform_device.h>
|
||
+#include <linux/ip.h>
|
||
+#include <linux/tcp.h>
|
||
+#include <linux/skbuff.h>
|
||
+#include <linux/ethtool.h>
|
||
+#include <linux/if_ether.h>
|
||
+#include <linux/crc32.h>
|
||
+#include <linux/mii.h>
|
||
+#include <linux/phy.h>
|
||
+#include <linux/if_vlan.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/prefetch.h>
|
||
+#include "stmmac.h"
|
||
+
|
||
+#include <mach/irqs.h>
|
||
+#include <mach/hardware.h>
|
||
+
|
||
+#include <linux/phy.h>
|
||
+
|
||
+#define STMMAC_RESOURCE_NAME "xmmaceth"
|
||
+#define PHY_RESOURCE_NAME "xmmacphy"
|
||
+
|
||
+#undef STMMAC_DEBUG
|
||
+/*#define STMMAC_DEBUG*/
|
||
+#ifdef STMMAC_DEBUG
|
||
+#define DBG(nlevel, klevel, fmt, args...) \
|
||
+ ((void)(netif_msg_##nlevel(priv) && \
|
||
+ printk(KERN_##klevel fmt, ## args)))
|
||
+#else
|
||
+#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
|
||
+#endif
|
||
+
|
||
+#undef STMMAC_RX_DEBUG
|
||
+/*#define STMMAC_RX_DEBUG*/
|
||
+#ifdef STMMAC_RX_DEBUG
|
||
+#define RX_DBG(fmt, args...) printk(fmt, ## args)
|
||
+#else
|
||
+#define RX_DBG(fmt, args...) do { } while (0)
|
||
+#endif
|
||
+
|
||
+#undef STMMAC_XMIT_DEBUG
|
||
+/*#define STMMAC_XMIT_DEBUG*/
|
||
+#ifdef STMMAC_TX_DEBUG
|
||
+#define TX_DBG(fmt, args...) printk(fmt, ## args)
|
||
+#else
|
||
+#define TX_DBG(fmt, args...) do { } while (0)
|
||
+#endif
|
||
+
|
||
+#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
|
||
+#define JUMBO_LEN 9000
|
||
+
|
||
+/* Module parameters */
|
||
+#define TX_TIMEO 5000 /* default 5 seconds */
|
||
+static int watchdog = TX_TIMEO;
|
||
+module_param(watchdog, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
|
||
+
|
||
+static int debug = -1; /* -1: default, 0: no output, 16: all */
|
||
+module_param(debug, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
|
||
+
|
||
+static int phyaddr = -1;
|
||
+module_param(phyaddr, int, S_IRUGO);
|
||
+MODULE_PARM_DESC(phyaddr, "Physical device address");
|
||
+
|
||
+#define DMA_TX_SIZE 256
|
||
+static int dma_txsize = DMA_TX_SIZE;
|
||
+module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
|
||
+
|
||
+#define DMA_RX_SIZE 256
|
||
+static int dma_rxsize = DMA_RX_SIZE;
|
||
+module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
|
||
+
|
||
+//static int flow_ctrl = FLOW_OFF;
|
||
+static int flow_ctrl = FLOW_AUTO; /* 默认开启流控 */
|
||
+module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
|
||
+
|
||
+static int pause = PAUSE_TIME;
|
||
+module_param(pause, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
|
||
+
|
||
+#define TC_DEFAULT 64 /* 当发送缓冲区达到一个阈值值时开始发送数据,使用默认值 */
|
||
+static int tc = TC_DEFAULT;
|
||
+module_param(tc, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(tc, "DMA threshold control value");
|
||
+
|
||
+#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
|
||
+static int buf_sz = DMA_BUFFER_SIZE;
|
||
+module_param(buf_sz, int, S_IRUGO | S_IWUSR);
|
||
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
|
||
+
|
||
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
|
||
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
|
||
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
|
||
+
|
||
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
|
||
+
|
||
+
|
||
+#define EDIT_ZENGCHUANJIE 1
|
||
+
|
||
+/**
|
||
+ * stmmac_verify_args - verify the driver parameters.
|
||
+ * Description: it verifies if some wrong parameter is passed to the driver.
|
||
+ * Note that wrong parameters are replaced with the default values.
|
||
+ */
|
||
+static void stmmac_verify_args(void)
|
||
+{
|
||
+ if (unlikely(watchdog < 0))
|
||
+ watchdog = TX_TIMEO;
|
||
+ if (unlikely(dma_rxsize < 0))
|
||
+ dma_rxsize = DMA_RX_SIZE;
|
||
+ if (unlikely(dma_txsize < 0))
|
||
+ dma_txsize = DMA_TX_SIZE;
|
||
+ if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
|
||
+ buf_sz = DMA_BUFFER_SIZE;
|
||
+ if (unlikely(flow_ctrl > 1))
|
||
+ flow_ctrl = FLOW_AUTO;
|
||
+ else if (likely(flow_ctrl < 0))
|
||
+ flow_ctrl = FLOW_OFF;
|
||
+ if (unlikely((pause < 0) || (pause > 0xffff)))
|
||
+ pause = PAUSE_TIME;
|
||
+}
|
||
+
|
||
+#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
|
||
+static void print_pkt(unsigned char *buf, int len)
|
||
+{
|
||
+ int j;
|
||
+ pr_info("len = %d byte, buf addr: 0x%p", len, buf);
|
||
+ for (j = 0; j < len; j++) {
|
||
+ if ((j % 16) == 0)
|
||
+ pr_info("\n %03x:", j);
|
||
+ pr_info(" %02x", buf[j]);
|
||
+ }
|
||
+ pr_info("\n");
|
||
+}
|
||
+#endif
|
||
+
|
||
+/* minimum number of free TX descriptors required to wake up TX process */
|
||
+#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
|
||
+
|
||
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
|
||
+{
|
||
+ /* cur_tx是指最后一个已经传送的的位置 */
|
||
+ //return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
|
||
+ return priv->dirty_tx + priv->dma_tx_size - (priv->cur_tx+1);
|
||
+}
|
||
+
|
||
+/* On some ST platforms, some HW system configuraton registers have to be
|
||
+ * set according to the link speed negotiated.
|
||
+ */
|
||
+static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
|
||
+{
|
||
+ struct phy_device *phydev = priv->phydev;
|
||
+
|
||
+ if (likely(priv->plat->fix_mac_speed))
|
||
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv,
|
||
+ phydev->speed);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_adjust_link
|
||
+ * @dev: net device structure
|
||
+ * Description: it adjusts the link parameters.
|
||
+ */
|
||
+static void stmmac_adjust_link(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ struct phy_device *phydev = priv->phydev;
|
||
+ unsigned long flags;
|
||
+ int new_state = 0;
|
||
+ unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
|
||
+
|
||
+ if (phydev == NULL)
|
||
+ return;
|
||
+
|
||
+ DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
|
||
+ phydev->addr, phydev->link);
|
||
+
|
||
+ spin_lock_irqsave(&priv->lock, flags);
|
||
+ if (phydev->link) {
|
||
+ u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
|
||
+
|
||
+ /* Now we make sure that we can be in full duplex mode.
|
||
+ * If not, we operate in half-duplex mode. */
|
||
+ if (phydev->duplex != priv->oldduplex) {
|
||
+ new_state = 1;
|
||
+ if (!(phydev->duplex))
|
||
+ //ctrl &= ~priv->hw->link.duplex;
|
||
+ ctrl &= ~(1<<11);
|
||
+ else
|
||
+ //ctrl |= priv->hw->link.duplex;
|
||
+ ctrl |= 1<<11;
|
||
+ priv->oldduplex = phydev->duplex;
|
||
+ }
|
||
+ /* Flow Control operation */
|
||
+ //if (phydev->pause)
|
||
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
|
||
+ fc, pause_time);
|
||
+
|
||
+ //printk("-----ctrl:%x\n", ctrl);
|
||
+ if (phydev->speed != priv->speed) {
|
||
+ new_state = 1;
|
||
+ switch (phydev->speed) {
|
||
+ case 1000:
|
||
+ if (likely(priv->plat->has_gmac))
|
||
+ ctrl &= ~priv->hw->link.port;
|
||
+ stmmac_hw_fix_mac_speed(priv);
|
||
+ break;
|
||
+ case 100:
|
||
+ {
|
||
+ unsigned int tmp = 0;
|
||
+ tmp = readl(IO_ADDRESS(GPIO_BASE+0xd0));
|
||
+ tmp |= 0x1;
|
||
+ writel(tmp, IO_ADDRESS(GPIO_BASE+0xd0));
|
||
+ /* 14 此位可通过mac_speed_o来设置获取, 0x100200d0 */
|
||
+ //ctrl |= 1<<14;
|
||
+ break;
|
||
+ }
|
||
+ case 10:
|
||
+ /*
|
||
+ if (priv->plat->has_gmac) {
|
||
+ ctrl |= priv->hw->link.port;
|
||
+ if (phydev->speed == SPEED_100) {
|
||
+ ctrl |= priv->hw->link.speed;
|
||
+ } else {
|
||
+ ctrl &= ~(priv->hw->link.speed);
|
||
+ }
|
||
+ } else {
|
||
+ ctrl &= ~priv->hw->link.port;
|
||
+ }
|
||
+ stmmac_hw_fix_mac_speed(priv);
|
||
+ */
|
||
+ //ctrl &= ~(1<<14);
|
||
+ {
|
||
+ /* 网络退化为10M,或者10M网络时使用 */
|
||
+ unsigned int tmp = 0;
|
||
+ tmp = readl(IO_ADDRESS(GPIO_BASE+0xd0));
|
||
+ tmp &= ~(0x1);
|
||
+ writel(tmp, IO_ADDRESS(GPIO_BASE+0xd0));
|
||
+ break;
|
||
+ }
|
||
+ default:
|
||
+ if (netif_msg_link(priv))
|
||
+ pr_warning("%s: Speed (%d) is not 10"
|
||
+ " or 100!\n", dev->name, phydev->speed);
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ priv->speed = phydev->speed;
|
||
+ }
|
||
+ //printk(">>>>>>ctrl:%x\n", ctrl);
|
||
+
|
||
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
|
||
+
|
||
+ if (!priv->oldlink) {
|
||
+ new_state = 1;
|
||
+ priv->oldlink = 1;
|
||
+ }
|
||
+ } else if (priv->oldlink) {
|
||
+ new_state = 1;
|
||
+ priv->oldlink = 0;
|
||
+ priv->speed = 0;
|
||
+ priv->oldduplex = -1;
|
||
+ }
|
||
+
|
||
+ if (new_state && netif_msg_link(priv))
|
||
+ phy_print_status(phydev);
|
||
+
|
||
+ spin_unlock_irqrestore(&priv->lock, flags);
|
||
+
|
||
+ DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_init_phy - PHY initialization
|
||
+ * @dev: net device structure
|
||
+ * Description: it initializes the driver's PHY state, and attaches the PHY
|
||
+ * to the mac driver.
|
||
+ * Return value:
|
||
+ * 0 on success
|
||
+ */
|
||
+static int stmmac_init_phy(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ struct phy_device *phydev;
|
||
+ char phy_id[MII_BUS_ID_SIZE + 3];
|
||
+ char bus_id[MII_BUS_ID_SIZE];
|
||
+
|
||
+ priv->oldlink = 0;
|
||
+ priv->speed = 0;
|
||
+ priv->oldduplex = -1;
|
||
+
|
||
+ if (priv->phy_addr == -1) {
|
||
+ /* We don't have a PHY, so do nothing */
|
||
+ return 0;
|
||
+ }
|
||
+
|
||
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
|
||
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
|
||
+ priv->phy_addr);
|
||
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
|
||
+
|
||
+ phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
|
||
+ priv->phy_interface);
|
||
+
|
||
+ if (IS_ERR(phydev)) {
|
||
+ pr_err("%s: Could not attach to PHY\n", dev->name);
|
||
+ return PTR_ERR(phydev);
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Broken HW is sometimes missing the pull-up resistor on the
|
||
+ * MDIO line, which results in reads to non-existent devices returning
|
||
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
|
||
+ * device as well.
|
||
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
|
||
+ */
|
||
+ if (phydev->phy_id == 0) {
|
||
+ phy_disconnect(phydev);
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
|
||
+ " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
|
||
+
|
||
+ priv->phydev = phydev;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static inline void stmmac_enable_mac(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
|
||
+
|
||
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
|
||
+ writel(value, ioaddr + MAC_CTRL_REG);
|
||
+}
|
||
+
|
||
+static inline void stmmac_disable_mac(void __iomem *ioaddr)
|
||
+{
|
||
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
|
||
+
|
||
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
|
||
+ writel(value, ioaddr + MAC_CTRL_REG);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * display_ring
|
||
+ * @p: pointer to the ring.
|
||
+ * @size: size of the ring.
|
||
+ * Description: display all the descriptors within the ring.
|
||
+ */
|
||
+static void display_ring(struct dma_desc *p, int size)
|
||
+{
|
||
+ struct tmp_s {
|
||
+ u64 a;
|
||
+ unsigned int b;
|
||
+ unsigned int c;
|
||
+ };
|
||
+ int i;
|
||
+ for (i = 0; i < size; i++) {
|
||
+ struct tmp_s *x = (struct tmp_s *)(p + i);
|
||
+ pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
|
||
+ i, (unsigned int)virt_to_phys(&p[i]),
|
||
+ (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
|
||
+ x->b, x->c);
|
||
+ pr_info("\n");
|
||
+ }
|
||
+}
|
||
+
|
||
+/**
|
||
+ * init_dma_desc_rings - init the RX/TX descriptor rings
|
||
+ * @dev: net device structure
|
||
+ * Description: this function initializes the DMA RX/TX descriptors
|
||
+ * and allocates the socket buffers.
|
||
+ */
|
||
+static void init_dma_desc_rings(struct net_device *dev)
|
||
+{
|
||
+ int i;
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ struct sk_buff *skb;
|
||
+ unsigned int txsize = priv->dma_tx_size;
|
||
+ unsigned int rxsize = priv->dma_rx_size;
|
||
+ unsigned int bfsize = priv->dma_buf_sz;
|
||
+ int buff2_needed = 0, dis_ic = 0;
|
||
+
|
||
+ /* Set the Buffer size according to the MTU;
|
||
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
|
||
+ */
|
||
+ if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
|
||
+ bfsize = BUF_SIZE_16KiB;
|
||
+ else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
|
||
+ bfsize = BUF_SIZE_8KiB;
|
||
+ else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
|
||
+ bfsize = BUF_SIZE_4KiB;
|
||
+ else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
|
||
+ bfsize = BUF_SIZE_2KiB;
|
||
+ else
|
||
+ bfsize = DMA_BUFFER_SIZE;
|
||
+
|
||
+ /* If the MTU exceeds 8k so use the second buffer in the chain */
|
||
+ if (bfsize >= BUF_SIZE_8KiB)
|
||
+ buff2_needed = 1;
|
||
+
|
||
+ DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
|
||
+ txsize, rxsize, bfsize);
|
||
+
|
||
+ priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
|
||
+ priv->rx_skbuff =
|
||
+ kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
|
||
+ priv->dma_rx =
|
||
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
|
||
+ rxsize *
|
||
+ sizeof(struct dma_desc),
|
||
+ &priv->dma_rx_phy,
|
||
+ GFP_KERNEL);
|
||
+ priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
|
||
+ GFP_KERNEL);
|
||
+ priv->dma_tx =
|
||
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
|
||
+ txsize *
|
||
+ sizeof(struct dma_desc),
|
||
+ &priv->dma_tx_phy,
|
||
+ GFP_KERNEL);
|
||
+
|
||
+ if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
|
||
+ pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
|
||
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
|
||
+ dev->name, priv->dma_rx, priv->dma_tx,
|
||
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
|
||
+
|
||
+ /* RX INITIALIZATION */
|
||
+ DBG(probe, INFO, "stmmac: SKB addresses:\n"
|
||
+ "skb\t\tskb data\tdma data\n");
|
||
+
|
||
+ for (i = 0; i < rxsize; i++) {
|
||
+ struct dma_desc *p = priv->dma_rx + i;
|
||
+
|
||
+ skb = netdev_alloc_skb_ip_align(dev, bfsize);
|
||
+ if (unlikely(skb == NULL)) {
|
||
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
|
||
+ break;
|
||
+ }
|
||
+ priv->rx_skbuff[i] = skb;
|
||
+ priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
|
||
+ bfsize, DMA_FROM_DEVICE);
|
||
+
|
||
+ p->des2 = priv->rx_skbuff_dma[i];
|
||
+ /* 采用ring即环形队列方式 */
|
||
+ if (unlikely(buff2_needed))
|
||
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
|
||
+ DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
|
||
+ priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
|
||
+ }
|
||
+ priv->cur_rx = 0;
|
||
+ priv->dirty_rx = (unsigned int)(i - rxsize);
|
||
+ priv->dma_buf_sz = bfsize;
|
||
+ buf_sz = bfsize;
|
||
+
|
||
+ /* TX INITIALIZATION */
|
||
+ for (i = 0; i < txsize; i++) {
|
||
+ priv->tx_skbuff[i] = NULL;
|
||
+ priv->dma_tx[i].des2 = 0;
|
||
+ }
|
||
+ priv->dirty_tx = 0;
|
||
+ priv->cur_tx = 0;
|
||
+
|
||
+ /* Clear the Rx/Tx descriptors */
|
||
+ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
|
||
+ priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
|
||
+
|
||
+ if (netif_msg_hw(priv)) {
|
||
+ pr_info("RX descriptor ring:\n");
|
||
+ display_ring(priv->dma_rx, rxsize);
|
||
+ pr_info("TX descriptor ring:\n");
|
||
+ display_ring(priv->dma_tx, txsize);
|
||
+ }
|
||
+}
|
||
+
|
||
+static void dma_free_rx_skbufs(struct stmmac_priv *priv)
|
||
+{
|
||
+ int i;
|
||
+
|
||
+ for (i = 0; i < priv->dma_rx_size; i++) {
|
||
+ if (priv->rx_skbuff[i]) {
|
||
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
|
||
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
|
||
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
|
||
+ }
|
||
+ priv->rx_skbuff[i] = NULL;
|
||
+ }
|
||
+}
|
||
+
|
||
+static void dma_free_tx_skbufs(struct stmmac_priv *priv)
|
||
+{
|
||
+ int i;
|
||
+
|
||
+ for (i = 0; i < priv->dma_tx_size; i++) {
|
||
+ if (priv->tx_skbuff[i] != NULL) {
|
||
+ struct dma_desc *p = priv->dma_tx + i;
|
||
+ if (p->des2)
|
||
+ dma_unmap_single(priv->device, p->des2,
|
||
+ priv->hw->desc->get_tx_len(p),
|
||
+ DMA_TO_DEVICE);
|
||
+ /* dev_kfree_skb_any可以在中断上下文、非中断上下文引用 */
|
||
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
|
||
+ priv->tx_skbuff[i] = NULL;
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+static void free_dma_desc_resources(struct stmmac_priv *priv)
|
||
+{
|
||
+ /* Release the DMA TX/RX socket buffers */
|
||
+ dma_free_rx_skbufs(priv);
|
||
+ dma_free_tx_skbufs(priv);
|
||
+
|
||
+ /* Free the region of consistent memory previously allocated for
|
||
+ * the DMA */
|
||
+ dma_free_coherent(priv->device,
|
||
+ priv->dma_tx_size * sizeof(struct dma_desc),
|
||
+ priv->dma_tx, priv->dma_tx_phy);
|
||
+ dma_free_coherent(priv->device,
|
||
+ priv->dma_rx_size * sizeof(struct dma_desc),
|
||
+ priv->dma_rx, priv->dma_rx_phy);
|
||
+ kfree(priv->rx_skbuff_dma);
|
||
+ kfree(priv->rx_skbuff);
|
||
+ kfree(priv->tx_skbuff);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_dma_operation_mode - HW DMA operation mode
|
||
+ * @priv : pointer to the private device structure.
|
||
+ * Description: it sets the DMA operation mode: tx/rx DMA thresholds
|
||
+ * or Store-And-Forward capability.
|
||
+ */
|
||
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
|
||
+{
|
||
+ if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
|
||
+ /* In case of GMAC, SF mode has to be enabled
|
||
+ * to perform the TX COE. This depends on:
|
||
+ * 1) TX COE if actually supported
|
||
+ * 2) There is no bugged Jumbo frame support
|
||
+ * that needs to not insert csum in the TDES.
|
||
+ */
|
||
+ priv->hw->dma->dma_mode(priv->ioaddr,
|
||
+ SF_DMA_MODE, SF_DMA_MODE);
|
||
+ tc = SF_DMA_MODE;
|
||
+ } else
|
||
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_tx:
|
||
+ * @priv: private driver structure
|
||
+ * Description: it reclaims resources after transmission completes.
|
||
+ */
|
||
+static void stmmac_tx(struct stmmac_priv *priv)
|
||
+{
|
||
+ unsigned int txsize = priv->dma_tx_size;
|
||
+
|
||
+ while (priv->dirty_tx != priv->cur_tx) {
|
||
+ int last;
|
||
+ unsigned int entry = priv->dirty_tx % txsize;
|
||
+ struct sk_buff *skb = priv->tx_skbuff[entry];
|
||
+ struct dma_desc *p = priv->dma_tx + entry;
|
||
+
|
||
+ /* Check if the descriptor is owned by the DMA. */
|
||
+ if (priv->hw->desc->get_tx_owner(p))
|
||
+ break;
|
||
+
|
||
+ /* Verify tx error by looking at the last segment */
|
||
+ last = priv->hw->desc->get_tx_ls(p);
|
||
+ if (likely(last)) {
|
||
+ int tx_error =
|
||
+ priv->hw->desc->tx_status(&priv->dev->stats,
|
||
+ &priv->xstats, p,
|
||
+ priv->ioaddr);
|
||
+ if (likely(tx_error == 0)) {
|
||
+ priv->dev->stats.tx_packets++;
|
||
+ priv->xstats.tx_pkt_n++;
|
||
+ } else
|
||
+ priv->dev->stats.tx_errors++;
|
||
+ }
|
||
+ TX_DBG("%s: curr %d, dirty %d\n", __func__,
|
||
+ priv->cur_tx, priv->dirty_tx);
|
||
+
|
||
+ if (likely(p->des2))
|
||
+ dma_unmap_single(priv->device, p->des2,
|
||
+ priv->hw->desc->get_tx_len(p),
|
||
+ DMA_TO_DEVICE);
|
||
+ if (unlikely(p->des3))
|
||
+ p->des3 = 0;
|
||
+
|
||
+ if (likely(skb != NULL)) {
|
||
+ /*
|
||
+ * If there's room in the queue (limit it to size)
|
||
+ * we add this skb back into the pool,
|
||
+ * if it's the right size.
|
||
+ */
|
||
+ if ((skb_queue_len(&priv->rx_recycle) <
|
||
+ priv->dma_rx_size) &&
|
||
+ skb_recycle_check(skb, priv->dma_buf_sz))
|
||
+ __skb_queue_head(&priv->rx_recycle, skb);
|
||
+ else
|
||
+ dev_kfree_skb(skb);
|
||
+
|
||
+ priv->tx_skbuff[entry] = NULL;
|
||
+ }
|
||
+
|
||
+ priv->hw->desc->release_tx_desc(p);
|
||
+
|
||
+ entry = (++priv->dirty_tx) % txsize;
|
||
+ }
|
||
+ if (unlikely(netif_queue_stopped(priv->dev) &&
|
||
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
|
||
+ netif_tx_lock(priv->dev);
|
||
+ if (netif_queue_stopped(priv->dev) &&
|
||
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
|
||
+ TX_DBG("%s: restart transmit\n", __func__);
|
||
+ netif_wake_queue(priv->dev);
|
||
+ }
|
||
+ netif_tx_unlock(priv->dev);
|
||
+ }
|
||
+}
|
||
+
|
||
+static inline void stmmac_enable_irq(struct stmmac_priv *priv)
|
||
+{
|
||
+ priv->hw->dma->enable_dma_irq(priv->ioaddr);
|
||
+}
|
||
+
|
||
+static inline void stmmac_disable_irq(struct stmmac_priv *priv)
|
||
+{
|
||
+ priv->hw->dma->disable_dma_irq(priv->ioaddr);
|
||
+}
|
||
+
|
||
+static int stmmac_has_work(struct stmmac_priv *priv)
|
||
+{
|
||
+ unsigned int has_work = 0;
|
||
+ int rxret, tx_work = 0;
|
||
+
|
||
+ rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
|
||
+ (priv->cur_rx % priv->dma_rx_size));
|
||
+
|
||
+ if (priv->dirty_tx != priv->cur_tx)
|
||
+ tx_work = 1;
|
||
+
|
||
+ if (likely(!rxret || tx_work))
|
||
+ has_work = 1;
|
||
+
|
||
+ return has_work;
|
||
+}
|
||
+
|
||
+static inline void _stmmac_schedule(struct stmmac_priv *priv)
|
||
+{
|
||
+ if (likely(stmmac_has_work(priv))) {
|
||
+ stmmac_disable_irq(priv);
|
||
+ napi_schedule(&priv->napi);
|
||
+ }
|
||
+}
|
||
+
|
||
+
|
||
+/**
|
||
+ * stmmac_tx_err:
|
||
+ * @priv: pointer to the private device structure
|
||
+ * Description: it cleans the descriptors and restarts the transmission
|
||
+ * in case of errors.
|
||
+ */
|
||
+static void stmmac_tx_err(struct stmmac_priv *priv)
|
||
+{
|
||
+
|
||
+ netif_stop_queue(priv->dev);
|
||
+
|
||
+ priv->hw->dma->stop_tx(priv->ioaddr);
|
||
+ dma_free_tx_skbufs(priv);
|
||
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
|
||
+ priv->dirty_tx = 0;
|
||
+ priv->cur_tx = 0;
|
||
+ priv->hw->dma->start_tx(priv->ioaddr);
|
||
+
|
||
+ priv->dev->stats.tx_errors++;
|
||
+ netif_wake_queue(priv->dev);
|
||
+}
|
||
+
|
||
+
|
||
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
|
||
+{
|
||
+ int status;
|
||
+
|
||
+ status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
|
||
+ if (likely(status == handle_tx_rx)) {
|
||
+ //printk("tx_rx.\n");
|
||
+ _stmmac_schedule(priv);
|
||
+ }
|
||
+
|
||
+ else if (unlikely(status == tx_hard_error_bump_tc)) {
|
||
+ /* Try to bump up the dma threshold on this failure */
|
||
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
|
||
+ tc += 64;
|
||
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
|
||
+ priv->xstats.threshold = tc;
|
||
+ }
|
||
+ } else if (unlikely(status == tx_hard_error))
|
||
+ stmmac_tx_err(priv);
|
||
+}
|
||
+
|
||
+#ifndef GPIO_BASE
|
||
+ #define GPIO_BASE (0x10020000)
|
||
+ #define GPIO78_MULT_USE_EN (0x1c)
|
||
+#endif
|
||
+
|
||
+void gpio8_reset_phy(void)
|
||
+{
|
||
+ unsigned int gpio8;
|
||
+ void __iomem *gpio8_addr = NULL;
|
||
+ int ret = 0;
|
||
+
|
||
+ gpio8 = GPIO_BASE+0xc0;
|
||
+ if (!request_mem_region(gpio8,12,"gpio8")) {
|
||
+ pr_err("%s: ERROR: memory allocation failed"
|
||
+ "cannot get the I/O addr 0x%x\n",
|
||
+ __func__, (unsigned int)gpio8);
|
||
+ return;
|
||
+ }
|
||
+ gpio8_addr = ioremap(gpio8,4);
|
||
+ if (!gpio8_addr) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_release_region;
|
||
+ }
|
||
+
|
||
+ /* gpio81 输出使能 */
|
||
+ writel(0x2, gpio8_addr+0x4);
|
||
+
|
||
+ /* 输出低 */
|
||
+ writel(0x0, gpio8_addr+0x0);
|
||
+
|
||
+ /* 延迟100ms */
|
||
+ mdelay(100);
|
||
+
|
||
+ /* 输出高 */
|
||
+ writel(0x2, gpio8_addr+0x0);
|
||
+
|
||
+ mdelay(100);
|
||
+
|
||
+ iounmap(gpio8_addr);
|
||
+
|
||
+out_release_region:
|
||
+ release_mem_region(gpio8, 12);
|
||
+ return;
|
||
+}
|
||
+/**
|
||
+ * stmmac_open - open entry point of the driver
|
||
+ * @dev : pointer to the device structure.
|
||
+ * Description:
|
||
+ * This function is the open entry point of the driver.
|
||
+ * Return value:
|
||
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
|
||
+ * file on failure.
|
||
+ */
|
||
+static int stmmac_open(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ int ret;
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_open.\n");
|
||
+
|
||
+ /* Check that the MAC address is valid. If its not, refuse
|
||
+ * to bring the device up. The user must specify an
|
||
+ * address using the following linux command:
|
||
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
|
||
+ if (!is_valid_ether_addr(dev->dev_addr)) {
|
||
+ random_ether_addr(dev->dev_addr);
|
||
+ pr_warning("%s: generated random MAC address %pM\n", dev->name,
|
||
+ dev->dev_addr);
|
||
+ }
|
||
+
|
||
+ gpio8_reset_phy();
|
||
+
|
||
+ stmmac_verify_args();
|
||
+
|
||
+ ret = stmmac_init_phy(dev);
|
||
+ if (unlikely(ret)) {
|
||
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
|
||
+ goto open_error;
|
||
+ }
|
||
+ if ((priv->phydev) && (priv->phydev->phy_id == 0x02430c54))
|
||
+ {
|
||
+ priv->mii->write(priv->phydev->bus, priv->phydev->addr, 26, 0x4924);
|
||
+ }
|
||
+
|
||
+
|
||
+ /* Create and initialize the TX/RX descriptors chains. */
|
||
+ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
|
||
+ priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
|
||
+ priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
|
||
+ init_dma_desc_rings(dev);
|
||
+
|
||
+ /* DMA initialization and SW reset */
|
||
+ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
|
||
+ priv->dma_tx_phy, priv->dma_rx_phy);
|
||
+ if (ret < 0) {
|
||
+ pr_err("%s: DMA initialization failed\n", __func__);
|
||
+ goto open_error;
|
||
+ }
|
||
+
|
||
+ /* Copy the MAC addr into the HW */
|
||
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
|
||
+ /* If required, perform hw setup of the bus. */
|
||
+ if (priv->plat->bus_setup)
|
||
+ priv->plat->bus_setup(priv->ioaddr);
|
||
+ /* Initialize the MAC Core */
|
||
+ priv->hw->mac->core_init(priv->ioaddr);
|
||
+
|
||
+ priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
|
||
+ if (priv->rx_coe)
|
||
+ pr_info("stmmac: Rx Checksum Offload Engine supported\n");
|
||
+ if (priv->plat->tx_coe)
|
||
+ pr_info("\tTX Checksum insertion supported\n");
|
||
+ netdev_update_features(dev);
|
||
+
|
||
+ /* Initialise the MMC (if present) to disable all interrupts. */
|
||
+ writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
|
||
+ writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
|
||
+
|
||
+ /* Request the IRQ lines */
|
||
+ ret = request_irq(dev->irq, stmmac_interrupt,
|
||
+ IRQF_SHARED, dev->name, dev);
|
||
+ if (unlikely(ret < 0)) {
|
||
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
|
||
+ __func__, dev->irq, ret);
|
||
+ goto open_error;
|
||
+ }
|
||
+
|
||
+ /* Enable the MAC Rx/Tx */
|
||
+ stmmac_enable_mac(priv->ioaddr);
|
||
+
|
||
+ /* Set the HW DMA mode and the COE */
|
||
+ stmmac_dma_operation_mode(priv);
|
||
+
|
||
+ /* Extra statistics */
|
||
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
|
||
+ priv->xstats.threshold = tc;
|
||
+
|
||
+ /* Start the ball rolling... */
|
||
+ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
|
||
+ priv->hw->dma->start_tx(priv->ioaddr);
|
||
+ priv->hw->dma->start_rx(priv->ioaddr);
|
||
+
|
||
+ /* Dump DMA/MAC registers */
|
||
+ if (netif_msg_hw(priv)) {
|
||
+ priv->hw->mac->dump_regs(priv->ioaddr);
|
||
+ priv->hw->dma->dump_regs(priv->ioaddr);
|
||
+ }
|
||
+
|
||
+ if (priv->phydev)
|
||
+ phy_start(priv->phydev);
|
||
+
|
||
+ napi_enable(&priv->napi);
|
||
+ skb_queue_head_init(&priv->rx_recycle);
|
||
+ netif_start_queue(dev);
|
||
+
|
||
+ return 0;
|
||
+
|
||
+open_error:
|
||
+ if (priv->phydev)
|
||
+ phy_disconnect(priv->phydev);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_release - close entry point of the driver
|
||
+ * @dev : device pointer.
|
||
+ * Description:
|
||
+ * This is the stop entry point of the driver.
|
||
+ */
|
||
+static int stmmac_release(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_release.\n");
|
||
+ /* Stop and disconnect the PHY */
|
||
+ if (priv->phydev) {
|
||
+ phy_stop(priv->phydev);
|
||
+ phy_disconnect(priv->phydev);
|
||
+ priv->phydev = NULL;
|
||
+ }
|
||
+
|
||
+ netif_stop_queue(dev);
|
||
+
|
||
+ napi_disable(&priv->napi);
|
||
+ skb_queue_purge(&priv->rx_recycle);
|
||
+
|
||
+ /* Free the IRQ lines */
|
||
+ free_irq(dev->irq, dev);
|
||
+
|
||
+ /* Stop TX/RX DMA and clear the descriptors */
|
||
+ priv->hw->dma->stop_tx(priv->ioaddr);
|
||
+ priv->hw->dma->stop_rx(priv->ioaddr);
|
||
+
|
||
+ /* Release and free the Rx/Tx resources */
|
||
+ free_dma_desc_resources(priv);
|
||
+
|
||
+ /* Disable the MAC Rx/Tx */
|
||
+ stmmac_disable_mac(priv->ioaddr);
|
||
+
|
||
+ netif_carrier_off(dev);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+
|
||
+/**
|
||
+ * stmmac_xmit:
|
||
+ * @skb : the socket buffer
|
||
+ * @dev : device pointer
|
||
+ * Description : Tx entry point of the driver.
|
||
+ */
|
||
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ unsigned int txsize = priv->dma_tx_size;
|
||
+ unsigned int entry;
|
||
+ int i, csum_insertion = 0;
|
||
+ int nfrags = skb_shinfo(skb)->nr_frags; /* 处理多个分散的缓冲区 */
|
||
+ struct dma_desc *desc, *first;
|
||
+ unsigned int nopaged_len;
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_xmit.\n");
|
||
+ /* 如果发现可用的缓冲区不足以容纳将要发送的缓冲区 */
|
||
+ if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
|
||
+ if (!netif_queue_stopped(dev)) {
|
||
+ netif_stop_queue(dev);
|
||
+ /* This is a hard error, log it. */
|
||
+ pr_err("%s: BUG! Tx Ring full when queue awake\n",
|
||
+ __func__);
|
||
+ }
|
||
+ return NETDEV_TX_BUSY;
|
||
+ }
|
||
+
|
||
+ entry = priv->cur_tx % txsize;
|
||
+
|
||
+#ifdef STMMAC_XMIT_DEBUG
|
||
+ if ((skb->len > ETH_FRAME_LEN) || nfrags)
|
||
+ pr_info("stmmac xmit:\n"
|
||
+ "\tskb addr %p - len: %d - nopaged_len: %d\n"
|
||
+ "\tn_frags: %d - ip_summed: %d - %s gso\n",
|
||
+ skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
|
||
+ !skb_is_gso(skb) ? "isn't" : "is");
|
||
+#endif
|
||
+
|
||
+ //csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
|
||
+ //csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
|
||
+ /* 毫无疑问,如果硬件不支持L4校验,那么ip_summed的值为CHECKSUM_UNNECESSARY*/
|
||
+
|
||
+ desc = priv->dma_tx + entry;
|
||
+ first = desc;
|
||
+
|
||
+#ifdef STMMAC_XMIT_DEBUG
|
||
+ if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
|
||
+ pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
|
||
+ "\t\tn_frags: %d, ip_summed: %d\n",
|
||
+ skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
|
||
+#endif
|
||
+ priv->tx_skbuff[entry] = skb;
|
||
+ /* 只支持百兆 */
|
||
+#if 0
|
||
+ if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
|
||
+ entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
|
||
+ desc = priv->dma_tx + entry;
|
||
+ } else {
|
||
+#endif
|
||
+ nopaged_len = skb_headlen(skb);
|
||
+ desc->des2 = dma_map_single(priv->device, skb->data,
|
||
+ nopaged_len, DMA_TO_DEVICE);
|
||
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
|
||
+ csum_insertion);
|
||
+#if 0
|
||
+ }
|
||
+#endif
|
||
+
|
||
+ for (i = 0; i < nfrags; i++) {
|
||
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||
+ int len = frag->size;
|
||
+
|
||
+ entry = (++priv->cur_tx) % txsize;
|
||
+ desc = priv->dma_tx + entry;
|
||
+
|
||
+ TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
|
||
+ desc->des2 = dma_map_page(priv->device, frag->page,
|
||
+ frag->page_offset,
|
||
+ len, DMA_TO_DEVICE);
|
||
+ priv->tx_skbuff[entry] = NULL;
|
||
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
|
||
+ priv->hw->desc->set_tx_owner(desc);
|
||
+ }
|
||
+
|
||
+ /* Interrupt on completition only for the latest segment */
|
||
+ /* 上层传送多个分段下来,这些分段都属于同一个帧,所以最后一个分段是帧的最后部分 */
|
||
+ priv->hw->desc->close_tx_desc(desc);
|
||
+
|
||
+ /* To avoid raise condition */
|
||
+ priv->hw->desc->set_tx_owner(first);
|
||
+
|
||
+ priv->cur_tx++;
|
||
+
|
||
+#ifdef STMMAC_XMIT_DEBUG
|
||
+ if (netif_msg_pktdata(priv)) {
|
||
+ pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
|
||
+ "first=%p, nfrags=%d\n",
|
||
+ (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
|
||
+ entry, first, nfrags);
|
||
+ display_ring(priv->dma_tx, txsize);
|
||
+ pr_info(">>> frame to be transmitted: ");
|
||
+ print_pkt(skb->data, skb->len);
|
||
+ }
|
||
+#endif
|
||
+ /*
|
||
+ * 上面准备好的缓冲区可以进行发送,但是如果要连续进行下一次的传送,就需要判断下一次
|
||
+ * 的可用的缓冲区
|
||
+ */
|
||
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
|
||
+ TX_DBG("%s: stop transmitted packets\n", __func__);
|
||
+ netif_stop_queue(dev);
|
||
+ }
|
||
+
|
||
+ dev->stats.tx_bytes += skb->len;
|
||
+
|
||
+ /* 发送Poll命令 */
|
||
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
|
||
+
|
||
+ return NETDEV_TX_OK;
|
||
+}
|
||
+
|
||
+static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
||
+{
|
||
+ unsigned int rxsize = priv->dma_rx_size;
|
||
+ int bfsize = priv->dma_buf_sz;
|
||
+ struct dma_desc *p = priv->dma_rx;
|
||
+
|
||
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
|
||
+ /* 将已经处理了的sk_buff重新分配空间 */
|
||
+ unsigned int entry = priv->dirty_rx % rxsize;
|
||
+ if (likely(priv->rx_skbuff[entry] == NULL)) {
|
||
+ struct sk_buff *skb;
|
||
+
|
||
+ /* 在发送时sk_buff会被回收到rx_recycle当中 */
|
||
+ /* 中断已经被失能,所以直接调用__skb_dequeue而非调用skb_dequeue */
|
||
+ skb = __skb_dequeue(&priv->rx_recycle);
|
||
+ if (skb == NULL)
|
||
+ skb = netdev_alloc_skb_ip_align(priv->dev,
|
||
+ bfsize);
|
||
+
|
||
+ if (unlikely(skb == NULL))
|
||
+ break;
|
||
+
|
||
+ priv->rx_skbuff[entry] = skb;
|
||
+ priv->rx_skbuff_dma[entry] =
|
||
+ dma_map_single(priv->device, skb->data, bfsize,
|
||
+ DMA_FROM_DEVICE);
|
||
+
|
||
+ (p + entry)->des2 = priv->rx_skbuff_dma[entry];
|
||
+ /*
|
||
+ if (unlikely(priv->plat->has_gmac)) {
|
||
+ if (bfsize >= BUF_SIZE_8KiB)
|
||
+ (p + entry)->des3 =
|
||
+ (p + entry)->des2 + BUF_SIZE_8KiB;
|
||
+ }
|
||
+ */
|
||
+ RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
|
||
+ }
|
||
+ priv->hw->desc->set_rx_owner(p + entry);
|
||
+ }
|
||
+}
|
||
+
|
||
+static int stmmac_rx(struct stmmac_priv *priv, int limit)
|
||
+{
|
||
+ unsigned int rxsize = priv->dma_rx_size;
|
||
+ unsigned int entry = priv->cur_rx % rxsize;
|
||
+ unsigned int next_entry;
|
||
+ unsigned int count = 0;
|
||
+ struct dma_desc *p = priv->dma_rx + entry;
|
||
+ struct dma_desc *p_next;
|
||
+
|
||
+#ifdef STMMAC_RX_DEBUG
|
||
+ if (netif_msg_hw(priv)) {
|
||
+ pr_debug(">>> stmmac_rx: descriptor ring:\n");
|
||
+ display_ring(priv->dma_rx, rxsize);
|
||
+ }
|
||
+#endif
|
||
+ count = 0;
|
||
+ while (!priv->hw->desc->get_rx_owner(p)) {
|
||
+ int status;
|
||
+
|
||
+ if (count >= limit)
|
||
+ break;
|
||
+
|
||
+ count++;
|
||
+
|
||
+ next_entry = (++priv->cur_rx) % rxsize;
|
||
+ p_next = priv->dma_rx + next_entry;
|
||
+ prefetch(p_next);
|
||
+
|
||
+ //printk(KERN_EMERG"recved: ");
|
||
+ /* read the status of the incoming frame */
|
||
+ status = (priv->hw->desc->rx_status(&priv->dev->stats,
|
||
+ &priv->xstats, p));
|
||
+ if (unlikely(status == discard_frame)) {
|
||
+ //printk(KERN_EMERG"discard_frame.\n");
|
||
+ priv->dev->stats.rx_errors++;
|
||
+ } else {
|
||
+ struct sk_buff *skb;
|
||
+ int frame_len;
|
||
+
|
||
+ frame_len = priv->hw->desc->get_rx_frame_len(p);
|
||
+ // printk(KERN_EMERG"frame_len:%d\n ", frame_len);
|
||
+ /*
|
||
+ for (i=0; i<frame_len; i++) {
|
||
+ printk(KERN_EMERG"%c ", skb->data[i]);
|
||
+ }
|
||
+ */
|
||
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
|
||
+ * Type frames (LLC/LLC-SNAP) */
|
||
+#if !EDIT_ZENGCHUANJIE
|
||
+ /*
|
||
+ * 我们只在ethernet当中使用(frame_len>1536-0x800/0x806/0x808), 不会使用0x802.3帧,
|
||
+ * 在没有开启checksum的时候,FCS及其长度会被自动丢弃,所以frame_len不需要-4
|
||
+ * 参考gmac文档p418,Table 6-32(IPCHKSUM_EN=0 && IPC_FULL_OFFLOAD=0)
|
||
+ *
|
||
+ */
|
||
+ if (unlikely(status != llc_snap))
|
||
+ frame_len -= ETH_FCS_LEN;
|
||
+#endif
|
||
+#ifdef STMMAC_RX_DEBUG
|
||
+ if (frame_len > ETH_FRAME_LEN)
|
||
+ pr_debug("\tRX frame size %d, COE status: %d\n",
|
||
+ frame_len, status);
|
||
+
|
||
+ if (netif_msg_hw(priv))
|
||
+ pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
|
||
+ p, entry, p->des2);
|
||
+#endif
|
||
+ skb = priv->rx_skbuff[entry];
|
||
+ if (unlikely(!skb)) {
|
||
+ pr_err("%s: Inconsistent Rx descriptor chain\n",
|
||
+ priv->dev->name);
|
||
+ priv->dev->stats.rx_dropped++;
|
||
+ break;
|
||
+ }
|
||
+ prefetch(skb->data - NET_IP_ALIGN);
|
||
+ priv->rx_skbuff[entry] = NULL;
|
||
+
|
||
+ skb_put(skb, frame_len);
|
||
+ /* 如果该数据被cache了,那么需要将数据刷新到cache当中去 */
|
||
+ dma_unmap_single(priv->device,
|
||
+ priv->rx_skbuff_dma[entry],
|
||
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
|
||
+#ifdef STMMAC_RX_DEBUG
|
||
+ if (netif_msg_pktdata(priv)) {
|
||
+ pr_info(" frame received (%dbytes)", frame_len);
|
||
+ print_pkt(skb->data, frame_len);
|
||
+ }
|
||
+#endif
|
||
+ skb->protocol = eth_type_trans(skb, priv->dev);
|
||
+
|
||
+#if EDIT_ZENGCHUANJIE
|
||
+ /* always for the old mac 10/100 */
|
||
+ skb_checksum_none_assert(skb);
|
||
+ netif_receive_skb(skb);
|
||
+#else
|
||
+ /* 由于gmac ip没有开启l4 checksum以及ipv4 checksum
|
||
+ * 所以这里肯定是CHECKSUM_NONE, 这些校验必须由上层来完成
|
||
+ * (目的在于简化驱动复杂度)
|
||
+ */
|
||
+ if (unlikely(status == csum_none)) {
|
||
+ /* always for the old mac 10/100 */
|
||
+ skb_checksum_none_assert(skb);
|
||
+ netif_receive_skb(skb);
|
||
+ } else {
|
||
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||
+ napi_gro_receive(&priv->napi, skb);
|
||
+ }
|
||
+#endif
|
||
+
|
||
+ priv->dev->stats.rx_packets++;
|
||
+ priv->dev->stats.rx_bytes += frame_len;
|
||
+ }
|
||
+ entry = next_entry;
|
||
+ p = p_next; /* use prefetched values */
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * 由于在接收的时候sk_buff是由驱动程序提供的,提供给协议处理函数并被上层处理后,
|
||
+ * skb_buff会被释放调,所以需要重新创建对应的sk_buff
|
||
+ */
|
||
+ stmmac_rx_refill(priv);
|
||
+
|
||
+ priv->xstats.rx_pkt_n += count;
|
||
+
|
||
+ return count;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_poll - stmmac poll method (NAPI)
|
||
+ * @napi : pointer to the napi structure.
|
||
+ * @budget : maximum number of packets that the current CPU can receive from
|
||
+ * all interfaces.
|
||
+ * Description :
|
||
+ * This function implements the the reception process.
|
||
+ * Also it runs the TX completion thread
|
||
+ */
|
||
+static int stmmac_poll(struct napi_struct *napi, int budget)
|
||
+{
|
||
+ struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
|
||
+ int work_done = 0;
|
||
+
|
||
+ priv->xstats.poll_n++;
|
||
+ /* 产生关于tx/rx的中断,就重新判断驱动程序缓冲区的能力,决定是否重新使能设备发送 */
|
||
+ stmmac_tx(priv);
|
||
+ work_done = stmmac_rx(priv, budget);
|
||
+
|
||
+ if (work_done < budget) {
|
||
+ /* 如果接收的数量小于能够接收的配额,则表示全部接收完成,将当前napi
|
||
+ * 从softnet_data的poll_list当中移除(清除SCHED标识),如果下次中断产生
|
||
+ * 再将当中设备的状态置为SCHED
|
||
+ */
|
||
+ napi_complete(napi);
|
||
+ stmmac_enable_irq(priv);
|
||
+ }
|
||
+ return work_done;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_tx_timeout
|
||
+ * @dev : Pointer to net device structure
|
||
+ * Description: this function is called when a packet transmission fails to
|
||
+ * complete within a reasonable tmrate. The driver will mark the error in the
|
||
+ * netdev structure and arrange for the device to be reset to a sane state
|
||
+ * in order to transmit a new packet.
|
||
+ */
|
||
+static void stmmac_tx_timeout(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_tx_timeout.\n");
|
||
+ /* Clear Tx resources and restart transmitting again */
|
||
+ stmmac_tx_err(priv);
|
||
+}
|
||
+
|
||
+/* Configuration changes (passed on by ifconfig) */
|
||
+static int stmmac_config(struct net_device *dev, struct ifmap *map)
|
||
+{
|
||
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
|
||
+ return -EBUSY;
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_config.\n");
|
||
+ /* Don't allow changing the I/O address */
|
||
+ if (map->base_addr != dev->base_addr) {
|
||
+ pr_warning("%s: can't change I/O address\n", dev->name);
|
||
+ return -EOPNOTSUPP;
|
||
+ }
|
||
+
|
||
+ /* Don't allow changing the IRQ */
|
||
+ if (map->irq != dev->irq) {
|
||
+ pr_warning("%s: can't change IRQ number %d\n",
|
||
+ dev->name, dev->irq);
|
||
+ return -EOPNOTSUPP;
|
||
+ }
|
||
+
|
||
+ /* ignore other fields */
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_multicast_list - entry point for multicast addressing
|
||
+ * @dev : pointer to the device structure
|
||
+ * Description:
|
||
+ * This function is a driver entry point which gets called by the kernel
|
||
+ * whenever multicast addresses must be enabled/disabled.
|
||
+ * Return value:
|
||
+ * void.
|
||
+ */
|
||
+static void stmmac_multicast_list(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_multicast_list.\n");
|
||
+ spin_lock(&priv->lock);
|
||
+ priv->hw->mac->set_filter(dev);
|
||
+ spin_unlock(&priv->lock);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_change_mtu - entry point to change MTU size for the device.
|
||
+ * @dev : device pointer.
|
||
+ * @new_mtu : the new MTU size for the device.
|
||
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
|
||
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
|
||
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
|
||
+ * Return value:
|
||
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
|
||
+ * file on failure.
|
||
+ */
|
||
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ int max_mtu;
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_change_mtu.\n");
|
||
+ if (netif_running(dev)) {
|
||
+ pr_err("%s: must be stopped to change its MTU\n", dev->name);
|
||
+ return -EBUSY;
|
||
+ }
|
||
+
|
||
+ if (priv->plat->has_gmac)
|
||
+ max_mtu = JUMBO_LEN;
|
||
+ else
|
||
+ max_mtu = ETH_DATA_LEN;
|
||
+
|
||
+ if ((new_mtu < 46) || (new_mtu > max_mtu)) {
|
||
+ pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ dev->mtu = new_mtu;
|
||
+ netdev_update_features(dev);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static u32 stmmac_fix_features(struct net_device *dev, u32 features)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_fix_features.\n");
|
||
+ if (!priv->rx_coe)
|
||
+ features &= ~NETIF_F_RXCSUM;
|
||
+ if (!priv->plat->tx_coe)
|
||
+ features &= ~NETIF_F_ALL_CSUM;
|
||
+
|
||
+ /* Some GMAC devices have a bugged Jumbo frame support that
|
||
+ * needs to have the Tx COE disabled for oversized frames
|
||
+ * (due to limited buffer sizes). In this case we disable
|
||
+ * the TX csum insertionin the TDES and not use SF. */
|
||
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
|
||
+ features &= ~NETIF_F_ALL_CSUM;
|
||
+
|
||
+ return features;
|
||
+}
|
||
+
|
||
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
||
+{
|
||
+ struct net_device *dev = (struct net_device *)dev_id;
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ if (unlikely(!dev)) {
|
||
+ pr_err("%s: invalid dev pointer\n", __func__);
|
||
+ return IRQ_NONE;
|
||
+ }
|
||
+
|
||
+ //printk(KERN_EMERG"Get int interrupt.\n");
|
||
+ if (priv->plat->has_gmac)
|
||
+ /* To handle GMAC own interrupts */
|
||
+ priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
|
||
+
|
||
+ stmmac_dma_interrupt(priv);
|
||
+
|
||
+ return IRQ_HANDLED;
|
||
+}
|
||
+
|
||
+
|
||
+/**
|
||
+ * stmmac_ioctl - Entry point for the Ioctl
|
||
+ * @dev: Device pointer.
|
||
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
|
||
+ * a proprietary structure used to pass information to the driver.
|
||
+ * @cmd: IOCTL command
|
||
+ * Description:
|
||
+ * Currently there are no special functionality supported in IOCTL, just the
|
||
+ * phy_mii_ioctl(...) can be invoked.
|
||
+ */
|
||
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+ int ret;
|
||
+
|
||
+ //printk(KERN_EMERG" Get In stmmac_ioctl.\n");
|
||
+ if (!netif_running(dev))
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (!priv->phydev)
|
||
+ return -EINVAL;
|
||
+
|
||
+ spin_lock(&priv->lock);
|
||
+ ret = phy_mii_ioctl(priv->phydev, rq, cmd);
|
||
+ spin_unlock(&priv->lock);
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+
|
||
+static const struct net_device_ops stmmac_netdev_ops = {
|
||
+ .ndo_open = stmmac_open,
|
||
+ .ndo_start_xmit = stmmac_xmit,
|
||
+ .ndo_stop = stmmac_release,
|
||
+ .ndo_change_mtu = stmmac_change_mtu,
|
||
+ .ndo_fix_features = stmmac_fix_features,
|
||
+ .ndo_set_multicast_list = stmmac_multicast_list,
|
||
+ /* 发送首先需要进入发送缓冲区队列,这个函数是缓冲区队列的某个元素一直迟迟没有被驱动程序的所处理 */
|
||
+ .ndo_tx_timeout = stmmac_tx_timeout,
|
||
+ .ndo_do_ioctl = stmmac_ioctl,
|
||
+ .ndo_set_config = stmmac_config,
|
||
+ .ndo_set_mac_address = eth_mac_addr,
|
||
+};
|
||
+
|
||
+/**
|
||
+ * stmmac_probe - Initialization of the adapter .
|
||
+ * @dev : device pointer
|
||
+ * Description: The function initializes the network device structure for
|
||
+ * the STMMAC driver. It also calls the low level routines
|
||
+ * in order to init the HW (i.e. the DMA engine)
|
||
+ */
|
||
+static int stmmac_probe(struct net_device *dev)
|
||
+{
|
||
+ int ret = 0;
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ ether_setup(dev);
|
||
+
|
||
+ dev->netdev_ops = &stmmac_netdev_ops;
|
||
+ stmmac_set_ethtool_ops(dev);
|
||
+
|
||
+ /*
|
||
+ * IP头部的校验和都是由软件完成的,hw_features不会指定ip头的校验
|
||
+ *
|
||
+ * NETF_F_IP_CSUM表示硬件可以计算IPV4的L4校验
|
||
+ * NETIF_F_HW_CSUM表示硬件可以计算所有协议的L4校验,在TCP/IP里面不会指定这个字段
|
||
+ * NETIF_F_NO_CSUM表示这个硬件很可靠,不需要任何的校验检查,只适用于回环设备
|
||
+ */
|
||
+ //dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
||
+ dev->hw_features = NETIF_F_SG; /* 暂时不需要支持L4硬件校验 */
|
||
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
|
||
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
|
||
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
|
||
+
|
||
+ if (flow_ctrl) /* 默认开启流控 */
|
||
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
|
||
+
|
||
+ priv->pause = pause;
|
||
+ netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
|
||
+
|
||
+ /* Get the MAC address */
|
||
+ priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
|
||
+ dev->dev_addr, 0);
|
||
+
|
||
+ if (!is_valid_ether_addr(dev->dev_addr))
|
||
+ pr_warning("\tno valid MAC address;"
|
||
+ "please, use ifconfig or nwhwconfig!\n");
|
||
+
|
||
+ spin_lock_init(&priv->lock);
|
||
+
|
||
+ ret = register_netdev(dev);
|
||
+ if (ret) {
|
||
+ pr_err("%s: ERROR %i registering the device\n",
|
||
+ __func__, ret);
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ //printk(KERN_EMERG"dev->ifindex:%d\n", dev->ifindex);
|
||
+
|
||
+ DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
|
||
+ dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
|
||
+ (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_mac_device_setup
|
||
+ * @dev : device pointer
|
||
+ * Description: select and initialise the mac device (mac100 or Gmac).
|
||
+ */
|
||
+static int stmmac_mac_device_setup(struct net_device *dev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(dev);
|
||
+
|
||
+ struct mac_device_info *device;
|
||
+
|
||
+ if (priv->plat->has_gmac) {
|
||
+ device = dwmac1000_setup(priv->ioaddr);
|
||
+ } else {
|
||
+ device = dwmac100_setup(priv->ioaddr);
|
||
+ //printk(KERN_EMERG"dwma100_setup.\n");
|
||
+ }
|
||
+
|
||
+ if (!device)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ if (priv->plat->enh_desc) {
|
||
+ device->desc = &enh_desc_ops;
|
||
+ pr_info("\tEnhanced descriptor structure\n");
|
||
+ } else {
|
||
+ device->desc = &ndesc_ops;
|
||
+ //printk(KERN_EMERG"normal descriptor.\n");
|
||
+ }
|
||
+
|
||
+ priv->hw = device;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int stmmacphy_dvr_probe(struct platform_device *pdev)
|
||
+{
|
||
+ struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
|
||
+
|
||
+ pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
|
||
+ plat_dat->bus_id);
|
||
+ //printk(KERN_EMERG"stmmacphy_dvr_probe.\n");
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static int stmmacphy_dvr_remove(struct platform_device *pdev)
|
||
+{
|
||
+ //printk(KERN_EMERG"stmmacphy_dvr_remove.\n");
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct platform_driver stmmacphy_driver = {
|
||
+ .driver = {
|
||
+ .name = PHY_RESOURCE_NAME,
|
||
+ },
|
||
+ .probe = stmmacphy_dvr_probe,
|
||
+ .remove = stmmacphy_dvr_remove,
|
||
+};
|
||
+
|
||
+/**
|
||
+ * stmmac_associate_phy
|
||
+ * @dev: pointer to device structure
|
||
+ * @data: points to the private structure.
|
||
+ * Description: Scans through all the PHYs we have registered and checks if
|
||
+ * any are associated with our MAC. If so, then just fill in
|
||
+ * the blanks in our local context structure
|
||
+ */
|
||
+static int stmmac_associate_phy(struct device *dev, void *data)
|
||
+{
|
||
+ struct stmmac_priv *priv = (struct stmmac_priv *)data;
|
||
+ struct plat_stmmacphy_data *plat_dat = dev->platform_data;
|
||
+
|
||
+ DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
|
||
+ plat_dat->bus_id);
|
||
+ //printk(KERN_EMERG"stmmac_associate_phy.\n");
|
||
+ //printk(KERN_EMERG"priv->plat->bus_id:%d\n", priv->plat->bus_id);
|
||
+ //printk(KERN_EMERG"plat_dat->bus_id:%d\n", plat_dat->bus_id);
|
||
+
|
||
+ /* Check that this phy is for the MAC being initialised */
|
||
+ if (priv->plat->bus_id != plat_dat->bus_id)
|
||
+ return 0;
|
||
+
|
||
+ /* OK, this PHY is connected to the MAC.
|
||
+ Go ahead and get the parameters */
|
||
+ DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
|
||
+ /* 本项目phy没有使用中断 */
|
||
+ /*
|
||
+ priv->phy_irq =
|
||
+ platform_get_irq_byname(to_platform_device(dev), "phyirq");
|
||
+ DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
|
||
+ plat_dat->bus_id, priv->phy_irq);
|
||
+ */
|
||
+
|
||
+ /* Override with kernel parameters if supplied XXX CRS XXX
|
||
+ * this needs to have multiple instances */
|
||
+ if ((phyaddr >= 0) && (phyaddr <= 31))
|
||
+ plat_dat->phy_addr = phyaddr;
|
||
+
|
||
+ priv->phy_addr = plat_dat->phy_addr;
|
||
+ priv->phy_mask = plat_dat->phy_mask;
|
||
+ priv->phy_interface = plat_dat->interface;
|
||
+ priv->phy_reset = plat_dat->phy_reset;
|
||
+
|
||
+ DBG(probe, DEBUG, "%s: exiting\n", __func__);
|
||
+ return 1; /* forces exit of driver_for_each_device() */
|
||
+}
|
||
+
|
||
+#ifndef GPIO_BASE
|
||
+ #define GPIO_BASE (0x10020000)
|
||
+ #define GPIO78_MULT_USE_EN (0x1c)
|
||
+#endif
|
||
+
|
||
+void gpio_7x8x_reuse_gmac(void)
|
||
+{
|
||
+ unsigned int gpio_7x8x_reuse,gpio_7x8x_reuse_value;
|
||
+ void __iomem *gpio_7x8x_resue_addr = NULL;
|
||
+ int ret = 0;
|
||
+
|
||
+ gpio_7x8x_reuse = GPIO_BASE+GPIO78_MULT_USE_EN;
|
||
+ if (!request_mem_region(gpio_7x8x_reuse,4,"gpio78")) {
|
||
+ pr_err("%s: ERROR: memory allocation failed"
|
||
+ "cannot get the I/O addr 0x%x\n",
|
||
+ __func__, (unsigned int)gpio_7x8x_reuse);
|
||
+ return;
|
||
+ }
|
||
+ gpio_7x8x_resue_addr = ioremap(gpio_7x8x_reuse,4);
|
||
+ if (!gpio_7x8x_resue_addr) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_release_region;
|
||
+ }
|
||
+
|
||
+ gpio_7x8x_reuse_value = readl(gpio_7x8x_resue_addr);
|
||
+
|
||
+ gpio_7x8x_reuse_value |= 0x01;
|
||
+
|
||
+ writel(gpio_7x8x_reuse_value, gpio_7x8x_resue_addr);
|
||
+
|
||
+ iounmap(gpio_7x8x_resue_addr);
|
||
+
|
||
+out_release_region:
|
||
+ release_mem_region(gpio_7x8x_reuse, 4);
|
||
+ return;
|
||
+}
|
||
+/**
|
||
+ * stmmac_dvr_probe
|
||
+ * @pdev: platform device pointer
|
||
+ * Description: the driver is initialized through platform_device.
|
||
+ */
|
||
+static int stmmac_dvr_probe(struct platform_device *pdev)
|
||
+{
|
||
+ int ret = 0;
|
||
+ struct resource *res;
|
||
+ void __iomem *addr = NULL;
|
||
+ struct net_device *ndev = NULL;
|
||
+ struct stmmac_priv *priv = NULL;
|
||
+ struct plat_stmmacenet_data *plat_dat;
|
||
+
|
||
+#define GPIO_REUSE
|
||
+#ifdef GPIO_REUSE
|
||
+ gpio_7x8x_reuse_gmac();
|
||
+#endif
|
||
+
|
||
+ pr_info("XMMMAC driver:\n\tplatform registration... ");
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ if (!res)
|
||
+ return -ENODEV;
|
||
+ //pr_info("res->start:%x\n",res->start);
|
||
+ pr_info("\tdone!\n");
|
||
+
|
||
+ if (!request_mem_region(res->start, resource_size(res),
|
||
+ pdev->name)) {
|
||
+ pr_err("%s: ERROR: memory allocation failed"
|
||
+ "cannot get the I/O addr 0x%x\n",
|
||
+ __func__, (unsigned int)res->start);
|
||
+ return -EBUSY;
|
||
+ }
|
||
+
|
||
+ addr = ioremap(res->start, resource_size(res));
|
||
+ if (!addr) {
|
||
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_release_region;
|
||
+ }
|
||
+
|
||
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
|
||
+ if (!ndev) {
|
||
+ pr_err("%s: ERROR: allocating the device\n", __func__);
|
||
+ ret = -ENOMEM;
|
||
+ goto out_unmap;
|
||
+ }
|
||
+
|
||
+ SET_NETDEV_DEV(ndev, &pdev->dev);
|
||
+
|
||
+ /* Get the MAC information */
|
||
+ ndev->irq = platform_get_irq_byname(pdev, "gmac_irq");
|
||
+ if (ndev->irq == -ENXIO) {
|
||
+ pr_err("%s: ERROR: MAC IRQ configuration "
|
||
+ "information not found\n", __func__);
|
||
+ ret = -ENXIO;
|
||
+ goto out_free_ndev;
|
||
+ }
|
||
+ //pr_info("irqno:%x\n",ndev->irq);
|
||
+
|
||
+ priv = netdev_priv(ndev);
|
||
+ priv->device = &(pdev->dev);
|
||
+ priv->dev = ndev;
|
||
+ plat_dat = pdev->dev.platform_data;
|
||
+
|
||
+ priv->plat = plat_dat;
|
||
+
|
||
+ priv->ioaddr = addr;
|
||
+
|
||
+ platform_set_drvdata(pdev, ndev);
|
||
+
|
||
+ /* Set the I/O base addr */
|
||
+ ndev->base_addr = (unsigned long)addr;
|
||
+
|
||
+ /* Custom initialisation */
|
||
+ if (priv->plat->init) {
|
||
+ ret = priv->plat->init(pdev);
|
||
+ if (unlikely(ret))
|
||
+ goto out_free_ndev;
|
||
+ }
|
||
+
|
||
+ /* MAC HW revice detection */
|
||
+ ret = stmmac_mac_device_setup(ndev);
|
||
+ if (ret < 0)
|
||
+ goto out_plat_exit;
|
||
+
|
||
+ /* Network Device Registration */
|
||
+ ret = stmmac_probe(ndev);
|
||
+ if (ret < 0)
|
||
+ goto out_plat_exit;
|
||
+
|
||
+ /* 检查是否注册了一个phy到系统当中去,与stmmacphy_driver对应 */
|
||
+ /* associate a PHY - it is provided by another platform bus */
|
||
+ if (!driver_for_each_device
|
||
+ (&(stmmacphy_driver.driver), NULL, (void *)priv,
|
||
+ stmmac_associate_phy)) {
|
||
+ pr_err("No PHY device is associated with this MAC!\n");
|
||
+ ret = -ENODEV;
|
||
+ goto out_unregister;
|
||
+ }
|
||
+
|
||
+ pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
|
||
+ "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
|
||
+ pdev->id, ndev->irq, addr);
|
||
+
|
||
+ /* MDIO bus Registration */
|
||
+ pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
|
||
+ ret = stmmac_mdio_register(ndev);
|
||
+ if (ret < 0)
|
||
+ goto out_unregister;
|
||
+ pr_debug("registered!\n");
|
||
+ return 0;
|
||
+
|
||
+out_unregister:
|
||
+ unregister_netdev(ndev);
|
||
+out_plat_exit:
|
||
+ if (priv->plat->exit)
|
||
+ priv->plat->exit(pdev);
|
||
+out_free_ndev:
|
||
+ free_netdev(ndev);
|
||
+ platform_set_drvdata(pdev, NULL);
|
||
+out_unmap:
|
||
+ iounmap(addr);
|
||
+out_release_region:
|
||
+ release_mem_region(res->start, resource_size(res));
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_dvr_remove
|
||
+ * @pdev: platform device pointer
|
||
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
|
||
+ * changes the link status, releases the DMA descriptor rings,
|
||
+ * unregisters the MDIO bus and unmaps the allocated memory.
|
||
+ */
|
||
+static int stmmac_dvr_remove(struct platform_device *pdev)
|
||
+{
|
||
+ struct net_device *ndev = platform_get_drvdata(pdev);
|
||
+ struct stmmac_priv *priv = netdev_priv(ndev);
|
||
+ struct resource *res;
|
||
+
|
||
+ pr_info("%s:\n\tremoving driver", __func__);
|
||
+
|
||
+ priv->hw->dma->stop_rx(priv->ioaddr);
|
||
+ priv->hw->dma->stop_tx(priv->ioaddr);
|
||
+
|
||
+ stmmac_disable_mac(priv->ioaddr);
|
||
+
|
||
+ netif_carrier_off(ndev);
|
||
+
|
||
+ stmmac_mdio_unregister(ndev);
|
||
+
|
||
+ if (priv->plat->exit)
|
||
+ priv->plat->exit(pdev);
|
||
+
|
||
+ platform_set_drvdata(pdev, NULL);
|
||
+ unregister_netdev(ndev);
|
||
+
|
||
+ iounmap((void *)priv->ioaddr);
|
||
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||
+ release_mem_region(res->start, resource_size(res));
|
||
+
|
||
+ free_netdev(ndev);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static const struct dev_pm_ops stmmac_pm_ops;
|
||
+
|
||
+static struct platform_driver stmmac_driver = {
|
||
+ .probe = stmmac_dvr_probe,
|
||
+ .remove = stmmac_dvr_remove,
|
||
+ .driver = {
|
||
+ .name = STMMAC_RESOURCE_NAME,
|
||
+ .owner = THIS_MODULE,
|
||
+ .pm = &stmmac_pm_ops,
|
||
+ },
|
||
+};
|
||
+
|
||
+/* PLATFORM DEVICE */
|
||
+struct plat_stmmacenet_data xm510_gmac_platdata = {
|
||
+ 0, /* MAC挂在哪条总线上面,默认axi为0,需要和phy的bus_id保持一致, --0*/
|
||
+ 16, /* DMA使用的总线burst长度,系统规定最大为16, --16 */
|
||
+ 0, /* mdio的时钟频率范围, 必须确保在1-2.5M之间,传输时钟为50M,设为--0000(存在一点问题?) */
|
||
+ 0, /* 千兆-1/百兆-0, 使用百兆,--0 */
|
||
+ 0, /* 增强形描述符-1/一般描述符-0, 使用一般描述符,--0 */
|
||
+ 0, /* gmac是否支持发送L4校验,简单起见暂不支持, --0 */
|
||
+ 0, /* 超大帧,不支持千兆,暂不支持, --0 */
|
||
+ 0, /* 休眠省电模式,暂不支持, --0 */
|
||
+ NULL, /* 调整速度,只支持自适应,速度在硬件上面决定, --NULL */
|
||
+ NULL, /* 暂时无用, --NULL */
|
||
+ NULL, /* 平台自定义初始化函数,不需要 ,NULL*/
|
||
+ NULL, /* 不需要, --NULL */
|
||
+ NULL, /* 不需要, --NULL */
|
||
+ NULL /* 不需要, --NULL */
|
||
+};
|
||
+
|
||
+static struct resource xm510_gmac_resource[] = {
|
||
+ [0] = {
|
||
+ .name = "gmac_addr",
|
||
+ .start = GMAC_BASE,
|
||
+ .end = GMAC_BASE + 0x10000 - 1,
|
||
+ .flags = IORESOURCE_MEM,
|
||
+ },
|
||
+ [1] = {
|
||
+ .name = "gmac_irq",
|
||
+ .start = GMAC_IRQ,
|
||
+ .end = GMAC_IRQ,
|
||
+ .flags = IORESOURCE_IRQ,
|
||
+ }
|
||
+
|
||
+};
|
||
+static void xm510_gmac_release(struct device *dev)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+struct platform_device xm510_gmac = {
|
||
+ .name = STMMAC_RESOURCE_NAME,
|
||
+ .id = -1,
|
||
+ .num_resources = ARRAY_SIZE(xm510_gmac_resource),
|
||
+ .resource = xm510_gmac_resource,
|
||
+ .dev = {
|
||
+ .coherent_dma_mask = DMA_BIT_MASK(32), /* 不然dma_alloc_coherent会失败 */
|
||
+ .platform_data = &xm510_gmac_platdata,
|
||
+ .release = xm510_gmac_release,
|
||
+ }
|
||
+};
|
||
+
|
||
+struct plat_stmmacphy_data xm510_phy_platdata = {
|
||
+ 0,
|
||
+ 1,
|
||
+ 0, /* phy_mask */
|
||
+ PHY_INTERFACE_MODE_RMII,
|
||
+ NULL,
|
||
+ NULL
|
||
+};
|
||
+
|
||
+static void xm510_phy_release(struct device *dev)
|
||
+{
|
||
+ return;
|
||
+}
|
||
+struct platform_device xm510_phy = {
|
||
+ .name = PHY_RESOURCE_NAME,
|
||
+ .id = -1 ,
|
||
+ .dev = {
|
||
+ .platform_data = &xm510_phy_platdata,
|
||
+ .release = xm510_phy_release,
|
||
+ }
|
||
+};
|
||
+
|
||
+
|
||
+/**
|
||
+ * stmmac_init_module - Entry point for the driver
|
||
+ * Description: This function is the entry point for the driver.
|
||
+ */
|
||
+static int __init stmmac_init_module(void)
|
||
+{
|
||
+ int ret;
|
||
+
|
||
+ /* first phy,不然在gmac当中探测不到phy */
|
||
+ ret = platform_device_register(&xm510_phy);
|
||
+ if (ret) {
|
||
+ pr_err("No MAC device registered!\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ //printk(KERN_EMERG"phy device register done.\n");
|
||
+
|
||
+ if (platform_driver_register(&stmmacphy_driver)) {
|
||
+ pr_err("No PHY devices registered!\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ //printk(KERN_EMERG"phy driver register done.\n");
|
||
+
|
||
+ /* and mac */
|
||
+ ret = platform_driver_register(&stmmac_driver);
|
||
+ if (ret) {
|
||
+ pr_err("No MAC driver registered!\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ //printk(KERN_EMERG"MAC driver register done.\n");
|
||
+
|
||
+ ret = platform_device_register(&xm510_gmac);
|
||
+ if (ret) {
|
||
+ pr_err("No MAC device registered!\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+ //printk(KERN_EMERG"MAC device register done.\n");
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_cleanup_module - Cleanup routine for the driver
|
||
+ * Description: This function is the cleanup routine for the driver.
|
||
+ */
|
||
+static void __exit stmmac_cleanup_module(void)
|
||
+{
|
||
+ platform_driver_unregister(&stmmacphy_driver);
|
||
+ platform_device_unregister(&xm510_phy);
|
||
+ platform_driver_unregister(&stmmac_driver);
|
||
+ platform_device_unregister(&xm510_gmac);
|
||
+}
|
||
+
|
||
+#ifndef MODULE
|
||
+static int __init stmmac_cmdline_opt(char *str)
|
||
+{
|
||
+ char *opt;
|
||
+
|
||
+ if (!str || !*str)
|
||
+ return -EINVAL;
|
||
+ while ((opt = strsep(&str, ",")) != NULL) {
|
||
+ if (!strncmp(opt, "debug:", 6))
|
||
+ strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
|
||
+ else if (!strncmp(opt, "phyaddr:", 8))
|
||
+ strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
|
||
+ else if (!strncmp(opt, "dma_txsize:", 11))
|
||
+ strict_strtoul(opt + 11, 0,
|
||
+ (unsigned long *)&dma_txsize);
|
||
+ else if (!strncmp(opt, "dma_rxsize:", 11))
|
||
+ strict_strtoul(opt + 11, 0,
|
||
+ (unsigned long *)&dma_rxsize);
|
||
+ else if (!strncmp(opt, "buf_sz:", 7))
|
||
+ strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
|
||
+ else if (!strncmp(opt, "tc:", 3))
|
||
+ strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
|
||
+ else if (!strncmp(opt, "watchdog:", 9))
|
||
+ strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
|
||
+ else if (!strncmp(opt, "flow_ctrl:", 10))
|
||
+ strict_strtoul(opt + 10, 0,
|
||
+ (unsigned long *)&flow_ctrl);
|
||
+ else if (!strncmp(opt, "pause:", 6))
|
||
+ strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
|
||
+ }
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/* 内核引导时,命令行当中是否包含stmmaceth参数,如果包含就执行后面的函数(被放到.init_setup区域) */
|
||
+__setup("stmmaceth=", stmmac_cmdline_opt);
|
||
+#endif
|
||
+
|
||
+module_init(stmmac_init_module);
|
||
+module_exit(stmmac_cleanup_module);
|
||
+
|
||
+MODULE_DESCRIPTION("XMMAC 10/100/1000 Ethernet driver");
|
||
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
|
||
+MODULE_LICENSE("GPL");
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/stmmac_mdio.c linux-3.0.101.xm510/drivers/net/xmmac/stmmac_mdio.c
|
||
--- linux-3.0.101/drivers/net/xmmac/stmmac_mdio.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/stmmac_mdio.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,222 @@
|
||
+/*******************************************************************************
|
||
+ STMMAC Ethernet Driver -- MDIO bus implementation
|
||
+ Provides Bus interface for MII registers
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Carl Shaw <carl.shaw@st.com>
|
||
+ Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/mii.h>
|
||
+#include <linux/phy.h>
|
||
+#include <linux/slab.h>
|
||
+
|
||
+#include "stmmac.h"
|
||
+
|
||
+#define MII_BUSY 0x00000001
|
||
+#define MII_WRITE 0x00000002
|
||
+
|
||
+/**
|
||
+ * stmmac_mdio_read
|
||
+ * @bus: points to the mii_bus structure
|
||
+ * @phyaddr: MII addr reg bits 15-11
|
||
+ * @phyreg: MII addr reg bits 10-6
|
||
+ * Description: it reads data from the MII register from within the phy device.
|
||
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
|
||
+ * accessing the PHY registers.
|
||
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
|
||
+ */
|
||
+static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
|
||
+{
|
||
+ struct net_device *ndev = bus->priv;
|
||
+ struct stmmac_priv *priv = netdev_priv(ndev);
|
||
+ unsigned int mii_address = priv->hw->mii.addr;
|
||
+ unsigned int mii_data = priv->hw->mii.data;
|
||
+
|
||
+ int data;
|
||
+ u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
|
||
+ ((phyreg << 6) & (0x000007C0)));
|
||
+ regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
|
||
+
|
||
+ //printk(KERN_EMERG"regValue:%x\n", regValue);
|
||
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
|
||
+ writel(regValue, priv->ioaddr + mii_address);
|
||
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
|
||
+
|
||
+ /* Read the data from the MII data register */
|
||
+ data = (int)readl(priv->ioaddr + mii_data);
|
||
+
|
||
+ //printk(KERN_EMERG"data:%x\n", data);
|
||
+
|
||
+ return data;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_mdio_write
|
||
+ * @bus: points to the mii_bus structure
|
||
+ * @phyaddr: MII addr reg bits 15-11
|
||
+ * @phyreg: MII addr reg bits 10-6
|
||
+ * @phydata: phy data
|
||
+ * Description: it writes the data into the MII register from within the device.
|
||
+ */
|
||
+static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
|
||
+ u16 phydata)
|
||
+{
|
||
+ struct net_device *ndev = bus->priv;
|
||
+ struct stmmac_priv *priv = netdev_priv(ndev);
|
||
+ unsigned int mii_address = priv->hw->mii.addr;
|
||
+ unsigned int mii_data = priv->hw->mii.data;
|
||
+
|
||
+ u16 value =
|
||
+ (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
|
||
+ | MII_WRITE;
|
||
+
|
||
+ value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
|
||
+
|
||
+
|
||
+ /* Wait until any existing MII operation is complete */
|
||
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
|
||
+
|
||
+ /* Set the MII address register to write */
|
||
+ writel(phydata, priv->ioaddr + mii_data);
|
||
+ writel(value, priv->ioaddr + mii_address);
|
||
+
|
||
+ /* Wait until any existing MII operation is complete */
|
||
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_mdio_reset
|
||
+ * @bus: points to the mii_bus structure
|
||
+ * Description: reset the MII bus
|
||
+ */
|
||
+static int stmmac_mdio_reset(struct mii_bus *bus)
|
||
+{
|
||
+ struct net_device *ndev = bus->priv;
|
||
+ struct stmmac_priv *priv = netdev_priv(ndev);
|
||
+ unsigned int mii_address = priv->hw->mii.addr;
|
||
+
|
||
+ if (priv->phy_reset) {
|
||
+ pr_debug("stmmac_mdio_reset: calling phy_reset\n");
|
||
+ priv->phy_reset(priv->plat->bsp_priv);
|
||
+ }
|
||
+
|
||
+ /* This is a workaround for problems with the STE101P PHY.
|
||
+ * It doesn't complete its reset until at least one clock cycle
|
||
+ * on MDC, so perform a dummy mdio read.
|
||
+ */
|
||
+ writel(0, priv->ioaddr + mii_address);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_mdio_register
|
||
+ * @ndev: net device structure
|
||
+ * Description: it registers the MII bus
|
||
+ */
|
||
+int stmmac_mdio_register(struct net_device *ndev)
|
||
+{
|
||
+ int err = 0;
|
||
+ struct mii_bus *new_bus;
|
||
+ int *irqlist;
|
||
+ struct stmmac_priv *priv = netdev_priv(ndev);
|
||
+ int addr, found;
|
||
+
|
||
+ new_bus = mdiobus_alloc();
|
||
+ if (new_bus == NULL)
|
||
+ return -ENOMEM;
|
||
+
|
||
+ irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
|
||
+ if (irqlist == NULL) {
|
||
+ err = -ENOMEM;
|
||
+ goto irqlist_alloc_fail;
|
||
+ }
|
||
+
|
||
+ /* Assign IRQ to phy at address phy_addr */
|
||
+ if (priv->phy_addr != -1)
|
||
+ irqlist[priv->phy_addr] = priv->phy_irq;
|
||
+
|
||
+ new_bus->name = "XMMMAC MII Bus";
|
||
+ new_bus->read = &stmmac_mdio_read;
|
||
+ new_bus->write = &stmmac_mdio_write;
|
||
+ new_bus->reset = &stmmac_mdio_reset;
|
||
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
|
||
+ new_bus->priv = ndev;
|
||
+ new_bus->irq = irqlist;
|
||
+ new_bus->phy_mask = priv->phy_mask;
|
||
+ new_bus->parent = priv->device;
|
||
+ err = mdiobus_register(new_bus);
|
||
+ if (err != 0) {
|
||
+ pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
|
||
+ goto bus_register_fail;
|
||
+ }
|
||
+
|
||
+ priv->mii = new_bus;
|
||
+
|
||
+ found = 0;
|
||
+ for (addr = 0; addr < 32; addr++) {
|
||
+ struct phy_device *phydev = new_bus->phy_map[addr];
|
||
+ if (phydev) {
|
||
+ // printk(KERN_EMERG"find %d\n", addr);
|
||
+ if (priv->phy_addr == -1) {
|
||
+ priv->phy_addr = addr;
|
||
+ phydev->irq = priv->phy_irq;
|
||
+ irqlist[addr] = priv->phy_irq;
|
||
+ }
|
||
+ pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
|
||
+ ndev->name, phydev->phy_id, addr,
|
||
+ phydev->irq, dev_name(&phydev->dev),
|
||
+ (addr == priv->phy_addr) ? " active" : "");
|
||
+ found = 1;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if (!found) {
|
||
+ pr_warning("%s: No PHY found\n", ndev->name);
|
||
+ } else {
|
||
+ //printk(KERN_EMERG"Found\n");
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+bus_register_fail:
|
||
+ kfree(irqlist);
|
||
+irqlist_alloc_fail:
|
||
+ kfree(new_bus);
|
||
+ return err;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * stmmac_mdio_unregister
|
||
+ * @ndev: net device structure
|
||
+ * Description: it unregisters the MII bus
|
||
+ */
|
||
+int stmmac_mdio_unregister(struct net_device *ndev)
|
||
+{
|
||
+ struct stmmac_priv *priv = netdev_priv(ndev);
|
||
+
|
||
+ mdiobus_unregister(priv->mii);
|
||
+ priv->mii->priv = NULL;
|
||
+ kfree(priv->mii);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/stmmac_timer.c linux-3.0.101.xm510/drivers/net/xmmac/stmmac_timer.c
|
||
--- linux-3.0.101/drivers/net/xmmac/stmmac_timer.c 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/stmmac_timer.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,134 @@
|
||
+/*******************************************************************************
|
||
+ STMMAC external timer support.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/etherdevice.h>
|
||
+#include "stmmac_timer.h"
|
||
+
|
||
+static void stmmac_timer_handler(void *data)
|
||
+{
|
||
+ struct net_device *dev = (struct net_device *)data;
|
||
+
|
||
+ stmmac_schedule(dev);
|
||
+}
|
||
+
|
||
+#define STMMAC_TIMER_MSG(timer, freq) \
|
||
+printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
|
||
+
|
||
+#if defined(CONFIG_STMMAC_RTC_TIMER)
|
||
+#include <linux/rtc.h>
|
||
+static struct rtc_device *stmmac_rtc;
|
||
+static rtc_task_t stmmac_task;
|
||
+
|
||
+static void stmmac_rtc_start(unsigned int new_freq)
|
||
+{
|
||
+ rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
|
||
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
|
||
+}
|
||
+
|
||
+static void stmmac_rtc_stop(void)
|
||
+{
|
||
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
|
||
+}
|
||
+
|
||
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
|
||
+{
|
||
+ stmmac_task.private_data = dev;
|
||
+ stmmac_task.func = stmmac_timer_handler;
|
||
+
|
||
+ stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
|
||
+ if (stmmac_rtc == NULL) {
|
||
+ pr_err("open rtc device failed\n");
|
||
+ return -ENODEV;
|
||
+ }
|
||
+
|
||
+ rtc_irq_register(stmmac_rtc, &stmmac_task);
|
||
+
|
||
+ /* Periodic mode is not supported */
|
||
+ if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
|
||
+ pr_err("set periodic failed\n");
|
||
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
|
||
+ rtc_class_close(stmmac_rtc);
|
||
+ return -1;
|
||
+ }
|
||
+
|
||
+ STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
|
||
+
|
||
+ tm->timer_start = stmmac_rtc_start;
|
||
+ tm->timer_stop = stmmac_rtc_stop;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+int stmmac_close_ext_timer(void)
|
||
+{
|
||
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
|
||
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
|
||
+ rtc_class_close(stmmac_rtc);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+#elif defined(CONFIG_STMMAC_TMU_TIMER)
|
||
+#include <linux/clk.h>
|
||
+#define TMU_CHANNEL "tmu2_clk"
|
||
+static struct clk *timer_clock;
|
||
+
|
||
+static void stmmac_tmu_start(unsigned int new_freq)
|
||
+{
|
||
+ clk_set_rate(timer_clock, new_freq);
|
||
+ clk_enable(timer_clock);
|
||
+}
|
||
+
|
||
+static void stmmac_tmu_stop(void)
|
||
+{
|
||
+ clk_disable(timer_clock);
|
||
+}
|
||
+
|
||
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
|
||
+{
|
||
+ timer_clock = clk_get(NULL, TMU_CHANNEL);
|
||
+
|
||
+ if (timer_clock == NULL)
|
||
+ return -1;
|
||
+
|
||
+ if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
|
||
+ timer_clock = NULL;
|
||
+ return -1;
|
||
+ }
|
||
+
|
||
+ STMMAC_TIMER_MSG("TMU2", tm->freq);
|
||
+ tm->timer_start = stmmac_tmu_start;
|
||
+ tm->timer_stop = stmmac_tmu_stop;
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+int stmmac_close_ext_timer(void)
|
||
+{
|
||
+ clk_disable(timer_clock);
|
||
+ tmu2_unregister_user();
|
||
+ clk_put(timer_clock);
|
||
+ return 0;
|
||
+}
|
||
+#endif
|
||
diff -urN linux-3.0.101/drivers/net/xmmac/stmmac_timer.h linux-3.0.101.xm510/drivers/net/xmmac/stmmac_timer.h
|
||
--- linux-3.0.101/drivers/net/xmmac/stmmac_timer.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/drivers/net/xmmac/stmmac_timer.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,42 @@
|
||
+/*******************************************************************************
|
||
+ STMMAC external timer Header File.
|
||
+
|
||
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||
+
|
||
+ This program is free software; you can redistribute it and/or modify it
|
||
+ under the terms and conditions of the GNU General Public License,
|
||
+ version 2, as published by the Free Software Foundation.
|
||
+
|
||
+ This program is distributed in the hope it will be useful, but WITHOUT
|
||
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ more details.
|
||
+
|
||
+ You should have received a copy of the GNU General Public License along with
|
||
+ this program; if not, write to the Free Software Foundation, Inc.,
|
||
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||
+
|
||
+ The full GNU General Public License is included in this distribution in
|
||
+ the file called "COPYING".
|
||
+
|
||
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||
+*******************************************************************************/
|
||
+
|
||
+struct stmmac_timer {
|
||
+ void (*timer_start) (unsigned int new_freq);
|
||
+ void (*timer_stop) (void);
|
||
+ unsigned int freq;
|
||
+ unsigned int enable;
|
||
+};
|
||
+
|
||
+/* Open the HW timer device and return 0 in case of success */
|
||
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
|
||
+/* Stop the timer and release it */
|
||
+int stmmac_close_ext_timer(void);
|
||
+/* Function used for scheduling task within the stmmac */
|
||
+void stmmac_schedule(struct net_device *dev);
|
||
+
|
||
+#if defined(CONFIG_STMMAC_TMU_TIMER)
|
||
+extern int tmu2_register_user(void *fnt, void *data);
|
||
+extern void tmu2_unregister_user(void);
|
||
+#endif
|
||
diff -urN linux-3.0.101/include/linux/acpi_dma.h linux-3.0.101.xm510/include/linux/acpi_dma.h
|
||
--- linux-3.0.101/include/linux/acpi_dma.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/acpi_dma.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,120 @@
|
||
+/*
|
||
+ * ACPI helpers for DMA request / controller
|
||
+ *
|
||
+ * Based on of_dma.h
|
||
+ *
|
||
+ * Copyright (C) 2013, Intel Corporation
|
||
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#ifndef __LINUX_ACPI_DMA_H
|
||
+#define __LINUX_ACPI_DMA_H
|
||
+
|
||
+#include <linux/list.h>
|
||
+#include <linux/device.h>
|
||
+#include <linux/dmaengine.h>
|
||
+
|
||
+/**
|
||
+ * struct acpi_dma_spec - slave device DMA resources
|
||
+ * @chan_id: channel unique id
|
||
+ * @slave_id: request line unique id
|
||
+ * @dev: struct device of the DMA controller to be used in the filter
|
||
+ * function
|
||
+ */
|
||
+struct acpi_dma_spec {
|
||
+ int chan_id;
|
||
+ int slave_id;
|
||
+ struct device *dev;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct acpi_dma - representation of the registered DMAC
|
||
+ * @dma_controllers: linked list node
|
||
+ * @dev: struct device of this controller
|
||
+ * @acpi_dma_xlate: callback function to find a suitable channel
|
||
+ * @data: private data used by a callback function
|
||
+ * @base_request_line: first supported request line (CSRT)
|
||
+ * @end_request_line: last supported request line (CSRT)
|
||
+ */
|
||
+struct acpi_dma {
|
||
+ struct list_head dma_controllers;
|
||
+ struct device *dev;
|
||
+ struct dma_chan *(*acpi_dma_xlate)
|
||
+ (struct acpi_dma_spec *, struct acpi_dma *);
|
||
+ void *data;
|
||
+ unsigned short base_request_line;
|
||
+ unsigned short end_request_line;
|
||
+};
|
||
+
|
||
+/* Used with acpi_dma_simple_xlate() */
|
||
+struct acpi_dma_filter_info {
|
||
+ dma_cap_mask_t dma_cap;
|
||
+ dma_filter_fn filter_fn;
|
||
+};
|
||
+
|
||
+#ifdef CONFIG_DMA_ACPI
|
||
+
|
||
+int acpi_dma_controller_register(struct device *dev,
|
||
+ struct dma_chan *(*acpi_dma_xlate)
|
||
+ (struct acpi_dma_spec *, struct acpi_dma *),
|
||
+ void *data);
|
||
+int acpi_dma_controller_free(struct device *dev);
|
||
+int devm_acpi_dma_controller_register(struct device *dev,
|
||
+ struct dma_chan *(*acpi_dma_xlate)
|
||
+ (struct acpi_dma_spec *, struct acpi_dma *),
|
||
+ void *data);
|
||
+void devm_acpi_dma_controller_free(struct device *dev);
|
||
+
|
||
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
|
||
+ size_t index);
|
||
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
|
||
+ const char *name);
|
||
+
|
||
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
|
||
+ struct acpi_dma *adma);
|
||
+#else
|
||
+
|
||
+static inline int acpi_dma_controller_register(struct device *dev,
|
||
+ struct dma_chan *(*acpi_dma_xlate)
|
||
+ (struct acpi_dma_spec *, struct acpi_dma *),
|
||
+ void *data)
|
||
+{
|
||
+ return -ENODEV;
|
||
+}
|
||
+static inline int acpi_dma_controller_free(struct device *dev)
|
||
+{
|
||
+ return -ENODEV;
|
||
+}
|
||
+static inline int devm_acpi_dma_controller_register(struct device *dev,
|
||
+ struct dma_chan *(*acpi_dma_xlate)
|
||
+ (struct acpi_dma_spec *, struct acpi_dma *),
|
||
+ void *data)
|
||
+{
|
||
+ return -ENODEV;
|
||
+}
|
||
+static inline void devm_acpi_dma_controller_free(struct device *dev)
|
||
+{
|
||
+}
|
||
+
|
||
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
|
||
+ struct device *dev, size_t index)
|
||
+{
|
||
+ return NULL;
|
||
+}
|
||
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
|
||
+ struct device *dev, const char *name)
|
||
+{
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+#define acpi_dma_simple_xlate NULL
|
||
+
|
||
+#endif
|
||
+
|
||
+#define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index
|
||
+
|
||
+#endif /* __LINUX_ACPI_DMA_H */
|
||
diff -urN linux-3.0.101/include/linux/amba/pl330.h linux-3.0.101.xm510/include/linux/amba/pl330.h
|
||
--- linux-3.0.101/include/linux/amba/pl330.h 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/amba/pl330.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -13,6 +13,7 @@
|
||
#define __AMBA_PL330_H_
|
||
|
||
#include <asm/hardware/pl330.h>
|
||
+#include <linux/dmaengine.h>
|
||
|
||
struct dma_pl330_peri {
|
||
/*
|
||
@@ -42,4 +43,6 @@
|
||
unsigned mcbuf_sz;
|
||
};
|
||
|
||
+extern bool pl330_filter(struct dma_chan *chan, void *param);
|
||
+
|
||
#endif /* __AMBA_PL330_H_ */
|
||
diff -urN linux-3.0.101/include/linux/dma/ipu-dma.h linux-3.0.101.xm510/include/linux/dma/ipu-dma.h
|
||
--- linux-3.0.101/include/linux/dma/ipu-dma.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/dma/ipu-dma.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,177 @@
|
||
+/*
|
||
+ * Copyright (C) 2008
|
||
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
|
||
+ *
|
||
+ * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#ifndef __LINUX_DMA_IPU_DMA_H
|
||
+#define __LINUX_DMA_IPU_DMA_H
|
||
+
|
||
+#include <linux/types.h>
|
||
+#include <linux/dmaengine.h>
|
||
+
|
||
+/* IPU DMA Controller channel definitions. */
|
||
+enum ipu_channel {
|
||
+ IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
|
||
+ IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
|
||
+ IDMAC_ADC_0 = 1,
|
||
+ IDMAC_IC_2 = 2,
|
||
+ IDMAC_ADC_1 = 2,
|
||
+ IDMAC_IC_3 = 3,
|
||
+ IDMAC_IC_4 = 4,
|
||
+ IDMAC_IC_5 = 5,
|
||
+ IDMAC_IC_6 = 6,
|
||
+ IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
|
||
+ IDMAC_IC_8 = 8,
|
||
+ IDMAC_IC_9 = 9,
|
||
+ IDMAC_IC_10 = 10,
|
||
+ IDMAC_IC_11 = 11,
|
||
+ IDMAC_IC_12 = 12,
|
||
+ IDMAC_IC_13 = 13,
|
||
+ IDMAC_SDC_0 = 14, /* Background synchronous display data */
|
||
+ IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
|
||
+ IDMAC_SDC_2 = 16,
|
||
+ IDMAC_SDC_3 = 17,
|
||
+ IDMAC_ADC_2 = 18,
|
||
+ IDMAC_ADC_3 = 19,
|
||
+ IDMAC_ADC_4 = 20,
|
||
+ IDMAC_ADC_5 = 21,
|
||
+ IDMAC_ADC_6 = 22,
|
||
+ IDMAC_ADC_7 = 23,
|
||
+ IDMAC_PF_0 = 24,
|
||
+ IDMAC_PF_1 = 25,
|
||
+ IDMAC_PF_2 = 26,
|
||
+ IDMAC_PF_3 = 27,
|
||
+ IDMAC_PF_4 = 28,
|
||
+ IDMAC_PF_5 = 29,
|
||
+ IDMAC_PF_6 = 30,
|
||
+ IDMAC_PF_7 = 31,
|
||
+};
|
||
+
|
||
+/* Order significant! */
|
||
+enum ipu_channel_status {
|
||
+ IPU_CHANNEL_FREE,
|
||
+ IPU_CHANNEL_INITIALIZED,
|
||
+ IPU_CHANNEL_READY,
|
||
+ IPU_CHANNEL_ENABLED,
|
||
+};
|
||
+
|
||
+#define IPU_CHANNELS_NUM 32
|
||
+
|
||
+enum pixel_fmt {
|
||
+ /* 1 byte */
|
||
+ IPU_PIX_FMT_GENERIC,
|
||
+ IPU_PIX_FMT_RGB332,
|
||
+ IPU_PIX_FMT_YUV420P,
|
||
+ IPU_PIX_FMT_YUV422P,
|
||
+ IPU_PIX_FMT_YUV420P2,
|
||
+ IPU_PIX_FMT_YVU422P,
|
||
+ /* 2 bytes */
|
||
+ IPU_PIX_FMT_RGB565,
|
||
+ IPU_PIX_FMT_RGB666,
|
||
+ IPU_PIX_FMT_BGR666,
|
||
+ IPU_PIX_FMT_YUYV,
|
||
+ IPU_PIX_FMT_UYVY,
|
||
+ /* 3 bytes */
|
||
+ IPU_PIX_FMT_RGB24,
|
||
+ IPU_PIX_FMT_BGR24,
|
||
+ /* 4 bytes */
|
||
+ IPU_PIX_FMT_GENERIC_32,
|
||
+ IPU_PIX_FMT_RGB32,
|
||
+ IPU_PIX_FMT_BGR32,
|
||
+ IPU_PIX_FMT_ABGR32,
|
||
+ IPU_PIX_FMT_BGRA32,
|
||
+ IPU_PIX_FMT_RGBA32,
|
||
+};
|
||
+
|
||
+enum ipu_color_space {
|
||
+ IPU_COLORSPACE_RGB,
|
||
+ IPU_COLORSPACE_YCBCR,
|
||
+ IPU_COLORSPACE_YUV
|
||
+};
|
||
+
|
||
+/*
|
||
+ * Enumeration of IPU rotation modes
|
||
+ */
|
||
+enum ipu_rotate_mode {
|
||
+ /* Note the enum values correspond to BAM value */
|
||
+ IPU_ROTATE_NONE = 0,
|
||
+ IPU_ROTATE_VERT_FLIP = 1,
|
||
+ IPU_ROTATE_HORIZ_FLIP = 2,
|
||
+ IPU_ROTATE_180 = 3,
|
||
+ IPU_ROTATE_90_RIGHT = 4,
|
||
+ IPU_ROTATE_90_RIGHT_VFLIP = 5,
|
||
+ IPU_ROTATE_90_RIGHT_HFLIP = 6,
|
||
+ IPU_ROTATE_90_LEFT = 7,
|
||
+};
|
||
+
|
||
+/*
|
||
+ * Enumeration of DI ports for ADC.
|
||
+ */
|
||
+enum display_port {
|
||
+ DISP0,
|
||
+ DISP1,
|
||
+ DISP2,
|
||
+ DISP3
|
||
+};
|
||
+
|
||
+struct idmac_video_param {
|
||
+ unsigned short in_width;
|
||
+ unsigned short in_height;
|
||
+ uint32_t in_pixel_fmt;
|
||
+ unsigned short out_width;
|
||
+ unsigned short out_height;
|
||
+ uint32_t out_pixel_fmt;
|
||
+ unsigned short out_stride;
|
||
+ bool graphics_combine_en;
|
||
+ bool global_alpha_en;
|
||
+ bool key_color_en;
|
||
+ enum display_port disp;
|
||
+ unsigned short out_left;
|
||
+ unsigned short out_top;
|
||
+};
|
||
+
|
||
+/*
|
||
+ * Union of initialization parameters for a logical channel. So far only video
|
||
+ * parameters are used.
|
||
+ */
|
||
+union ipu_channel_param {
|
||
+ struct idmac_video_param video;
|
||
+};
|
||
+
|
||
+struct idmac_tx_desc {
|
||
+ struct dma_async_tx_descriptor txd;
|
||
+ struct scatterlist *sg; /* scatterlist for this */
|
||
+ unsigned int sg_len; /* tx-descriptor. */
|
||
+ struct list_head list;
|
||
+};
|
||
+
|
||
+struct idmac_channel {
|
||
+ struct dma_chan dma_chan;
|
||
+ dma_cookie_t completed; /* last completed cookie */
|
||
+ union ipu_channel_param params;
|
||
+ enum ipu_channel link; /* input channel, linked to the output */
|
||
+ enum ipu_channel_status status;
|
||
+ void *client; /* Only one client per channel */
|
||
+ unsigned int n_tx_desc;
|
||
+ struct idmac_tx_desc *desc; /* allocated tx-descriptors */
|
||
+ struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
|
||
+ struct list_head free_list; /* free tx-descriptors */
|
||
+ struct list_head queue; /* queued tx-descriptors */
|
||
+ spinlock_t lock; /* protects sg[0,1], queue */
|
||
+ struct mutex chan_mutex; /* protects status, cookie, free_list */
|
||
+ bool sec_chan_en;
|
||
+ int active_buffer;
|
||
+ unsigned int eof_irq;
|
||
+ char eof_name[16]; /* EOF IRQ name for request_irq() */
|
||
+};
|
||
+
|
||
+#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
|
||
+#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
|
||
+
|
||
+#endif /* __LINUX_DMA_IPU_DMA_H */
|
||
diff -urN linux-3.0.101/include/linux/dma-buf.h linux-3.0.101.xm510/include/linux/dma-buf.h
|
||
--- linux-3.0.101/include/linux/dma-buf.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/dma-buf.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,199 @@
|
||
+/*
|
||
+ * Header file for dma buffer sharing framework.
|
||
+ *
|
||
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
|
||
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
|
||
+ *
|
||
+ * Many thanks to linaro-mm-sig list, and specially
|
||
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
|
||
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
|
||
+ * refining of this idea.
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify it
|
||
+ * under the terms of the GNU General Public License version 2 as published by
|
||
+ * the Free Software Foundation.
|
||
+ *
|
||
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
||
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
+ * more details.
|
||
+ *
|
||
+ * You should have received a copy of the GNU General Public License along with
|
||
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
||
+ */
|
||
+#ifndef __DMA_BUF_H__
|
||
+#define __DMA_BUF_H__
|
||
+
|
||
+#include <linux/file.h>
|
||
+#include <linux/err.h>
|
||
+#include <linux/scatterlist.h>
|
||
+#include <linux/list.h>
|
||
+#include <linux/dma-mapping.h>
|
||
+#include <linux/fs.h>
|
||
+
|
||
+struct device;
|
||
+struct dma_buf;
|
||
+struct dma_buf_attachment;
|
||
+
|
||
+/**
|
||
+ * struct dma_buf_ops - operations possible on struct dma_buf
|
||
+ * @attach: [optional] allows different devices to 'attach' themselves to the
|
||
+ * given buffer. It might return -EBUSY to signal that backing storage
|
||
+ * is already allocated and incompatible with the requirements
|
||
+ * of requesting device.
|
||
+ * @detach: [optional] detach a given device from this buffer.
|
||
+ * @map_dma_buf: returns list of scatter pages allocated, increases usecount
|
||
+ * of the buffer. Requires atleast one attach to be called
|
||
+ * before. Returned sg list should already be mapped into
|
||
+ * _device_ address space. This call may sleep. May also return
|
||
+ * -EINTR. Should return -EINVAL if attach hasn't been called yet.
|
||
+ * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
|
||
+ * pages.
|
||
+ * @release: release this buffer; to be called after the last dma_buf_put.
|
||
+ * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
|
||
+ * caches and allocate backing storage (if not yet done)
|
||
+ * respectively pin the objet into memory.
|
||
+ * @end_cpu_access: [optional] called after cpu access to flush caches.
|
||
+ * @kmap_atomic: maps a page from the buffer into kernel address
|
||
+ * space, users may not block until the subsequent unmap call.
|
||
+ * This callback must not sleep.
|
||
+ * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
|
||
+ * This Callback must not sleep.
|
||
+ * @kmap: maps a page from the buffer into kernel address space.
|
||
+ * @kunmap: [optional] unmaps a page from the buffer.
|
||
+ * @mmap: used to expose the backing storage to userspace. Note that the
|
||
+ * mapping needs to be coherent - if the exporter doesn't directly
|
||
+ * support this, it needs to fake coherency by shooting down any ptes
|
||
+ * when transitioning away from the cpu domain.
|
||
+ * @vmap: [optional] creates a virtual mapping for the buffer into kernel
|
||
+ * address space. Same restrictions as for vmap and friends apply.
|
||
+ * @vunmap: [optional] unmaps a vmap from the buffer
|
||
+ */
|
||
+struct dma_buf_ops {
|
||
+ int (*attach)(struct dma_buf *, struct device *,
|
||
+ struct dma_buf_attachment *);
|
||
+
|
||
+ void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
|
||
+
|
||
+ /* For {map,unmap}_dma_buf below, any specific buffer attributes
|
||
+ * required should get added to device_dma_parameters accessible
|
||
+ * via dev->dma_params.
|
||
+ */
|
||
+ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
|
||
+ enum dma_data_direction);
|
||
+ void (*unmap_dma_buf)(struct dma_buf_attachment *,
|
||
+ struct sg_table *,
|
||
+ enum dma_data_direction);
|
||
+ /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
|
||
+ * if the call would block.
|
||
+ */
|
||
+
|
||
+ /* after final dma_buf_put() */
|
||
+ void (*release)(struct dma_buf *);
|
||
+
|
||
+ int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
|
||
+ enum dma_data_direction);
|
||
+ void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
|
||
+ enum dma_data_direction);
|
||
+ void *(*kmap_atomic)(struct dma_buf *, unsigned long);
|
||
+ void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||
+ void *(*kmap)(struct dma_buf *, unsigned long);
|
||
+ void (*kunmap)(struct dma_buf *, unsigned long, void *);
|
||
+
|
||
+ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
|
||
+
|
||
+ void *(*vmap)(struct dma_buf *);
|
||
+ void (*vunmap)(struct dma_buf *, void *vaddr);
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct dma_buf - shared buffer object
|
||
+ * @size: size of the buffer
|
||
+ * @file: file pointer used for sharing buffers across, and for refcounting.
|
||
+ * @attachments: list of dma_buf_attachment that denotes all devices attached.
|
||
+ * @ops: dma_buf_ops associated with this buffer object.
|
||
+ * @exp_name: name of the exporter; useful for debugging.
|
||
+ * @list_node: node for dma_buf accounting and debugging.
|
||
+ * @priv: exporter specific private data for this buffer object.
|
||
+ */
|
||
+struct dma_buf {
|
||
+ size_t size;
|
||
+ struct file *file;
|
||
+ struct list_head attachments;
|
||
+ const struct dma_buf_ops *ops;
|
||
+ /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
|
||
+ struct mutex lock;
|
||
+ unsigned vmapping_counter;
|
||
+ void *vmap_ptr;
|
||
+ const char *exp_name;
|
||
+ struct list_head list_node;
|
||
+ void *priv;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct dma_buf_attachment - holds device-buffer attachment data
|
||
+ * @dmabuf: buffer for this attachment.
|
||
+ * @dev: device attached to the buffer.
|
||
+ * @node: list of dma_buf_attachment.
|
||
+ * @priv: exporter specific attachment data.
|
||
+ *
|
||
+ * This structure holds the attachment information between the dma_buf buffer
|
||
+ * and its user device(s). The list contains one attachment struct per device
|
||
+ * attached to the buffer.
|
||
+ */
|
||
+struct dma_buf_attachment {
|
||
+ struct dma_buf *dmabuf;
|
||
+ struct device *dev;
|
||
+ struct list_head node;
|
||
+ void *priv;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * get_dma_buf - convenience wrapper for get_file.
|
||
+ * @dmabuf: [in] pointer to dma_buf
|
||
+ *
|
||
+ * Increments the reference count on the dma-buf, needed in case of drivers
|
||
+ * that either need to create additional references to the dmabuf on the
|
||
+ * kernel side. For example, an exporter that needs to keep a dmabuf ptr
|
||
+ * so that subsequent exports don't create a new dmabuf.
|
||
+ */
|
||
+static inline void get_dma_buf(struct dma_buf *dmabuf)
|
||
+{
|
||
+ get_file(dmabuf->file);
|
||
+}
|
||
+
|
||
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
|
||
+ struct device *dev);
|
||
+void dma_buf_detach(struct dma_buf *dmabuf,
|
||
+ struct dma_buf_attachment *dmabuf_attach);
|
||
+
|
||
+struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
||
+ size_t size, int flags, const char *);
|
||
+
|
||
+#define dma_buf_export(priv, ops, size, flags) \
|
||
+ dma_buf_export_named(priv, ops, size, flags, __FILE__)
|
||
+
|
||
+int dma_buf_fd(struct dma_buf *dmabuf, int flags);
|
||
+struct dma_buf *dma_buf_get(int fd);
|
||
+void dma_buf_put(struct dma_buf *dmabuf);
|
||
+
|
||
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
|
||
+ enum dma_data_direction);
|
||
+void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
|
||
+ enum dma_data_direction);
|
||
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
||
+ enum dma_data_direction dir);
|
||
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
||
+ enum dma_data_direction dir);
|
||
+void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
|
||
+void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
|
||
+void *dma_buf_kmap(struct dma_buf *, unsigned long);
|
||
+void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
|
||
+
|
||
+int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
|
||
+ unsigned long);
|
||
+void *dma_buf_vmap(struct dma_buf *);
|
||
+void dma_buf_vunmap(struct dma_buf *, void *vaddr);
|
||
+int dma_buf_debugfs_create_file(const char *name,
|
||
+ int (*write)(struct seq_file *));
|
||
+#endif /* __DMA_BUF_H__ */
|
||
diff -urN linux-3.0.101/include/linux/dma-contiguous.h linux-3.0.101.xm510/include/linux/dma-contiguous.h
|
||
--- linux-3.0.101/include/linux/dma-contiguous.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/dma-contiguous.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,110 @@
|
||
+#ifndef __LINUX_CMA_H
|
||
+#define __LINUX_CMA_H
|
||
+
|
||
+/*
|
||
+ * Contiguous Memory Allocator for DMA mapping framework
|
||
+ * Copyright (c) 2010-2011 by Samsung Electronics.
|
||
+ * Written by:
|
||
+ * Marek Szyprowski <m.szyprowski@samsung.com>
|
||
+ * Michal Nazarewicz <mina86@mina86.com>
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or
|
||
+ * modify it under the terms of the GNU General Public License as
|
||
+ * published by the Free Software Foundation; either version 2 of the
|
||
+ * License or (at your optional) any later version of the license.
|
||
+ */
|
||
+
|
||
+/*
|
||
+ * Contiguous Memory Allocator
|
||
+ *
|
||
+ * The Contiguous Memory Allocator (CMA) makes it possible to
|
||
+ * allocate big contiguous chunks of memory after the system has
|
||
+ * booted.
|
||
+ *
|
||
+ * Why is it needed?
|
||
+ *
|
||
+ * Various devices on embedded systems have no scatter-getter and/or
|
||
+ * IO map support and require contiguous blocks of memory to
|
||
+ * operate. They include devices such as cameras, hardware video
|
||
+ * coders, etc.
|
||
+ *
|
||
+ * Such devices often require big memory buffers (a full HD frame
|
||
+ * is, for instance, more then 2 mega pixels large, i.e. more than 6
|
||
+ * MB of memory), which makes mechanisms such as kmalloc() or
|
||
+ * alloc_page() ineffective.
|
||
+ *
|
||
+ * At the same time, a solution where a big memory region is
|
||
+ * reserved for a device is suboptimal since often more memory is
|
||
+ * reserved then strictly required and, moreover, the memory is
|
||
+ * inaccessible to page system even if device drivers don't use it.
|
||
+ *
|
||
+ * CMA tries to solve this issue by operating on memory regions
|
||
+ * where only movable pages can be allocated from. This way, kernel
|
||
+ * can use the memory for pagecache and when device driver requests
|
||
+ * it, allocated pages can be migrated.
|
||
+ *
|
||
+ * Driver usage
|
||
+ *
|
||
+ * CMA should not be used by the device drivers directly. It is
|
||
+ * only a helper framework for dma-mapping subsystem.
|
||
+ *
|
||
+ * For more information, see kernel-docs in drivers/base/dma-contiguous.c
|
||
+ */
|
||
+
|
||
+#ifdef __KERNEL__
|
||
+
|
||
+struct cma;
|
||
+struct page;
|
||
+struct device;
|
||
+
|
||
+#ifdef CONFIG_CMA
|
||
+
|
||
+/*
|
||
+ * There is always at least global CMA area and a few optional device
|
||
+ * private areas configured in kernel .config.
|
||
+ */
|
||
+#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||
+
|
||
+extern struct cma *dma_contiguous_default_area;
|
||
+
|
||
+void dma_contiguous_reserve(phys_addr_t addr_limit);
|
||
+int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
||
+ phys_addr_t base, phys_addr_t limit);
|
||
+
|
||
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
||
+ unsigned int order);
|
||
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||
+ int count);
|
||
+
|
||
+#else
|
||
+
|
||
+#define MAX_CMA_AREAS (0)
|
||
+
|
||
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
|
||
+
|
||
+static inline
|
||
+int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
||
+ phys_addr_t base, phys_addr_t limit)
|
||
+{
|
||
+ return -ENOSYS;
|
||
+}
|
||
+
|
||
+static inline
|
||
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
||
+ unsigned int order)
|
||
+{
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static inline
|
||
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||
+ int count)
|
||
+{
|
||
+ return false;
|
||
+}
|
||
+
|
||
+#endif
|
||
+
|
||
+#endif
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/include/linux/dma-direction.h linux-3.0.101.xm510/include/linux/dma-direction.h
|
||
--- linux-3.0.101/include/linux/dma-direction.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/dma-direction.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,13 @@
|
||
+#ifndef _LINUX_DMA_DIRECTION_H
|
||
+#define _LINUX_DMA_DIRECTION_H
|
||
+/*
|
||
+ * These definitions mirror those in pci.h, so they can be used
|
||
+ * interchangeably with their PCI_ counterparts.
|
||
+ */
|
||
+enum dma_data_direction {
|
||
+ DMA_BIDIRECTIONAL = 0,
|
||
+ DMA_TO_DEVICE = 1,
|
||
+ DMA_FROM_DEVICE = 2,
|
||
+ DMA_NONE = 3,
|
||
+};
|
||
+#endif
|
||
diff -urN linux-3.0.101/include/linux/jiffies.h linux-3.0.101.xm510/include/linux/jiffies.h
|
||
--- linux-3.0.101/include/linux/jiffies.h 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/jiffies.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -164,7 +164,9 @@
|
||
* Have the 32 bit jiffies value wrap 5 minutes after boot
|
||
* so jiffies wrap bugs show up earlier.
|
||
*/
|
||
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
|
||
+/*#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))*/
|
||
+/*为了兼容应用程序*/
|
||
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (0))
|
||
|
||
/*
|
||
* Change timeval to jiffies, trying to avoid the
|
||
diff -urN linux-3.0.101/include/linux/mmc/arasan_plat.h linux-3.0.101.xm510/include/linux/mmc/arasan_plat.h
|
||
--- linux-3.0.101/include/linux/mmc/arasan_plat.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/mmc/arasan_plat.h 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -0,0 +1,39 @@
|
||
+/*
|
||
+ *
|
||
+ * include/linux/mmc/arsan_plat.h
|
||
+ *
|
||
+ * platform data for the Arasan MMC/SD/SDI HC driver
|
||
+ *
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License as published by
|
||
+ * the Free Software Foundation; either version 2 of the License.
|
||
+ *
|
||
+ */
|
||
+
|
||
+#ifndef __ARASAN_PLAT_H__
|
||
+#define __ARASAN_PLAT_H__
|
||
+
|
||
+typedef void (*func_t)(void);
|
||
+struct arasan_platform_data {
|
||
+ unsigned int need_poll;
|
||
+ unsigned int need_detect;
|
||
+ unsigned int use_pio;
|
||
+
|
||
+ unsigned int card_irq; /* sd卡不能处理card中断 */
|
||
+ unsigned int auto_cmd12; /* 多块传输时, sdio不能自动发送cmd12
|
||
+ 而sd则需要自动发送cmd12;如果sdio自动
|
||
+ 发送cmd12,则wifi不会响应,引起响应超时
|
||
+ */
|
||
+ unsigned int sdio_4bit_data;
|
||
+
|
||
+ func_t p_powerup;
|
||
+
|
||
+};
|
||
+
|
||
+static inline int arasan_claim_resource(struct platform_device *pdev)
|
||
+{
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+#endif
|
||
diff -urN linux-3.0.101/include/linux/of_dma.h linux-3.0.101.xm510/include/linux/of_dma.h
|
||
--- linux-3.0.101/include/linux/of_dma.h 1970-01-01 03:00:00.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/of_dma.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -0,0 +1,71 @@
|
||
+/*
|
||
+ * OF helpers for DMA request / controller
|
||
+ *
|
||
+ * Based on of_gpio.h
|
||
+ *
|
||
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
|
||
+ *
|
||
+ * This program is free software; you can redistribute it and/or modify
|
||
+ * it under the terms of the GNU General Public License version 2 as
|
||
+ * published by the Free Software Foundation.
|
||
+ */
|
||
+
|
||
+#ifndef __LINUX_OF_DMA_H
|
||
+#define __LINUX_OF_DMA_H
|
||
+
|
||
+#include <linux/of.h>
|
||
+#include <linux/dmaengine.h>
|
||
+
|
||
+struct device_node;
|
||
+
|
||
+struct of_dma {
|
||
+ struct list_head of_dma_controllers;
|
||
+ struct device_node *of_node;
|
||
+ struct dma_chan *(*of_dma_xlate)
|
||
+ (struct of_phandle_args *, struct of_dma *);
|
||
+ void *of_dma_data;
|
||
+};
|
||
+
|
||
+struct of_dma_filter_info {
|
||
+ dma_cap_mask_t dma_cap;
|
||
+ dma_filter_fn filter_fn;
|
||
+};
|
||
+
|
||
+#ifdef CONFIG_OF
|
||
+extern int of_dma_controller_register(struct device_node *np,
|
||
+ struct dma_chan *(*of_dma_xlate)
|
||
+ (struct of_phandle_args *, struct of_dma *),
|
||
+ void *data);
|
||
+extern void of_dma_controller_free(struct device_node *np);
|
||
+extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||
+ const char *name);
|
||
+extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||
+ struct of_dma *ofdma);
|
||
+#else
|
||
+static inline int of_dma_controller_register(struct device_node *np,
|
||
+ struct dma_chan *(*of_dma_xlate)
|
||
+ (struct of_phandle_args *, struct of_dma *),
|
||
+ void *data)
|
||
+{
|
||
+ return -ENODEV;
|
||
+}
|
||
+
|
||
+static inline void of_dma_controller_free(struct device_node *np)
|
||
+{
|
||
+}
|
||
+
|
||
+static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||
+ const char *name)
|
||
+{
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||
+ struct of_dma *ofdma)
|
||
+{
|
||
+ return NULL;
|
||
+}
|
||
+
|
||
+#endif
|
||
+
|
||
+#endif /* __LINUX_OF_DMA_H */
|
||
diff -urN linux-3.0.101/include/linux/stmmac.h linux-3.0.101.xm510/include/linux/stmmac.h
|
||
--- linux-3.0.101/include/linux/stmmac.h 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/linux/stmmac.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -30,7 +30,6 @@
|
||
|
||
/* platform data for platform device structure's platform_data field */
|
||
|
||
-/* Private data for the STM on-board ethernet driver */
|
||
struct plat_stmmacenet_data {
|
||
int bus_id;
|
||
int pbl;
|
||
diff -urN linux-3.0.101/include/net/cfg80211.h linux-3.0.101.xm510/include/net/cfg80211.h
|
||
--- linux-3.0.101/include/net/cfg80211.h 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/include/net/cfg80211.h 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -426,6 +426,7 @@
|
||
* @STATION_INFO_RX_BITRATE: @rxrate fields are filled
|
||
* @STATION_INFO_BSS_PARAM: @bss_param filled
|
||
* @STATION_INFO_CONNECTED_TIME: @connected_time filled
|
||
+ * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
|
||
*/
|
||
enum station_info_flags {
|
||
STATION_INFO_INACTIVE_TIME = 1<<0,
|
||
@@ -444,7 +445,8 @@
|
||
STATION_INFO_SIGNAL_AVG = 1<<13,
|
||
STATION_INFO_RX_BITRATE = 1<<14,
|
||
STATION_INFO_BSS_PARAM = 1<<15,
|
||
- STATION_INFO_CONNECTED_TIME = 1<<16
|
||
+ STATION_INFO_CONNECTED_TIME = 1<<16,
|
||
+ STATION_INFO_ASSOC_REQ_IES = 1<<17
|
||
};
|
||
|
||
/**
|
||
@@ -536,6 +538,11 @@
|
||
* This number should increase every time the list of stations
|
||
* changes, i.e. when a station is added or removed, so that
|
||
* userspace can tell whether it got a consistent snapshot.
|
||
+ * @assoc_req_ies: IEs from (Re)Association Request.
|
||
+ * This is used only when in AP mode with drivers that do not use
|
||
+ * user space MLME/SME implementation. The information is provided for
|
||
+ * the cfg80211_new_sta() calls to notify user space of the IEs.
|
||
+ * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
|
||
*/
|
||
struct station_info {
|
||
u32 filled;
|
||
@@ -558,6 +565,9 @@
|
||
struct sta_bss_parameters bss_param;
|
||
|
||
int generation;
|
||
+
|
||
+ const u8 *assoc_req_ies;
|
||
+ size_t assoc_req_ies_len;
|
||
};
|
||
|
||
/**
|
||
diff -urN linux-3.0.101/init/main.c linux-3.0.101.xm510/init/main.c
|
||
--- linux-3.0.101/init/main.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/init/main.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -454,6 +454,39 @@
|
||
vmalloc_init();
|
||
}
|
||
|
||
+
|
||
+
|
||
+static int xminfo_read_proc(char *buf, char **start, off_t offset, int count, int *eof, void *data)
|
||
+{
|
||
+ extern struct tag_xminfo xminfo;
|
||
+ int len = 0;
|
||
+
|
||
+ len += sprintf(buf + len, "######XMINFO######\n");
|
||
+ len += sprintf(buf + len, "DATE:%s\n",__DATE__);
|
||
+ len += sprintf(buf + len, "HWID:%s\n", xminfo.hwid);
|
||
+ len += sprintf(buf + len, "ethaddr:%s\n", xminfo.ethaddr);
|
||
+ len += sprintf(buf + len, "xmauto:%d\n", xminfo.xmauto);
|
||
+ len += sprintf(buf + len, "xmuart:%d\n", xminfo.xmuart);
|
||
+ len += sprintf(buf + len, "ID:%s\n", xminfo.p_id);
|
||
+ len += sprintf(buf + len, "##################\n");
|
||
+ return len;
|
||
+}
|
||
+
|
||
+static int __init proc_xminfo_create(void)
|
||
+{
|
||
+ struct proc_dir_entry *pxm_proc = NULL;
|
||
+ struct proc_dir_entry *pfile = NULL;
|
||
+ pxm_proc = create_proc_entry("xm", S_IFDIR|S_IRUGO|S_IXUGO, NULL);
|
||
+ pfile = create_proc_read_entry("xminfo", 0,pxm_proc,xminfo_read_proc, NULL);
|
||
+ if (NULL == pfile){
|
||
+ printk("create xminfo proc failed\n");
|
||
+ return -1;
|
||
+ }
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+
|
||
+
|
||
asmlinkage void __init start_kernel(void)
|
||
{
|
||
char * command_line;
|
||
@@ -610,6 +643,7 @@
|
||
page_writeback_init();
|
||
#ifdef CONFIG_PROC_FS
|
||
proc_root_init();
|
||
+ proc_xminfo_create();
|
||
#endif
|
||
cgroup_init();
|
||
cpuset_init();
|
||
diff -urN linux-3.0.101/kernel/printk.c linux-3.0.101.xm510/kernel/printk.c
|
||
--- linux-3.0.101/kernel/printk.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/kernel/printk.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -43,7 +43,7 @@
|
||
#include <linux/rculist.h>
|
||
|
||
#include <asm/uaccess.h>
|
||
-
|
||
+#include <asm/setup.h>
|
||
/*
|
||
* Architectures can override it:
|
||
*/
|
||
@@ -1020,12 +1020,25 @@
|
||
/*
|
||
* Set up a list of consoles. Called from init/main.c
|
||
*/
|
||
+extern struct tag_xminfo xminfo;
|
||
static int __init console_setup(char *str)
|
||
{
|
||
char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
|
||
char *s, *options, *brl_options = NULL;
|
||
int idx;
|
||
|
||
+ if (xminfo.xmuart == 1)
|
||
+ {
|
||
+ strcpy(buf, "null");
|
||
+ idx = 0;
|
||
+ options = NULL;
|
||
+ __add_preferred_console(buf, idx, options, brl_options);
|
||
+ console_set_on_cmdline = 1;
|
||
+ return 1;
|
||
+ }
|
||
+
|
||
+
|
||
+
|
||
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
|
||
if (!memcmp(str, "brl,", 4)) {
|
||
brl_options = "";
|
||
diff -urN linux-3.0.101/Makefile linux-3.0.101.xm510/Makefile
|
||
--- linux-3.0.101/Makefile 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/Makefile 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -1,3 +1,5 @@
|
||
+ARCH=arm
|
||
+CROSS_COMPILE=arm-linux-
|
||
VERSION = 3
|
||
PATCHLEVEL = 0
|
||
SUBLEVEL = 101
|
||
diff -urN linux-3.0.101/net/mac80211/main.c linux-3.0.101.xm510/net/mac80211/main.c
|
||
--- linux-3.0.101/net/mac80211/main.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/net/mac80211/main.c 2017-09-11 14:47:37.000000000 +0300
|
||
@@ -931,6 +931,14 @@
|
||
"Failed to add default virtual iface\n");
|
||
}
|
||
|
||
+ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) {
|
||
+ result = ieee80211_if_add(local, "eth2", NULL,
|
||
+ NL80211_IFTYPE_STATION, NULL);
|
||
+ if (result)
|
||
+ wiphy_warn(local->hw.wiphy,
|
||
+ "Failed to add default virtual iface\n");
|
||
+ }
|
||
+
|
||
rtnl_unlock();
|
||
|
||
local->network_latency_notifier.notifier_call =
|
||
diff -urN linux-3.0.101/net/wireless/mlme.c linux-3.0.101.xm510/net/wireless/mlme.c
|
||
--- linux-3.0.101/net/wireless/mlme.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/net/wireless/mlme.c 2016-07-14 08:38:29.000000000 +0300
|
||
@@ -115,7 +115,7 @@
|
||
goto out;
|
||
}
|
||
|
||
- if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) {
|
||
+ if (!wdev->conn && wdev->sme_state != CFG80211_SME_CONNECTING) {
|
||
/*
|
||
* This is for the userspace SME, the CONNECTING
|
||
* state will be changed to CONNECTED by
|
||
diff -urN linux-3.0.101/net/wireless/nl80211.c linux-3.0.101.xm510/net/wireless/nl80211.c
|
||
--- linux-3.0.101/net/wireless/nl80211.c 2013-10-22 10:58:59.000000000 +0300
|
||
+++ linux-3.0.101.xm510/net/wireless/nl80211.c 2016-05-17 09:52:17.000000000 +0300
|
||
@@ -2212,6 +2212,9 @@
|
||
nla_nest_end(msg, bss_param);
|
||
}
|
||
nla_nest_end(msg, sinfoattr);
|
||
+ if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
|
||
+ NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
|
||
+ sinfo->assoc_req_ies);
|
||
|
||
return genlmsg_end(msg, hdr);
|
||
|