--- linux-4.9.37/drivers/dma/edmac_goke.c 1970-01-01 03:00:00.000000000 +0300 +++ linux-4.9.y/drivers/dma/edmac_goke.c 2021-06-07 13:01:33.000000000 +0300 @@ -0,0 +1,1286 @@ +/* + * Copyright (c) Hunan Goke,Chengdu Goke,Shandong Goke. 2021. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "edmac_goke.h" +#include "dmaengine.h" +#include "virt-dma.h" + +#define DRIVER_NAME "edmac-goke" + +int edmac_trace_level = EDMAC_TRACE_LEVEL; + +typedef struct edmac_lli { + u64 next_lli; + u32 reserved[5]; + u32 count; + u64 src_addr; + u64 dest_addr; + u32 config; + u32 pad[3]; +} edmac_lli; + +struct edmac_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +struct transfer_desc { + struct virt_dma_desc virt_desc; + + dma_addr_t llis_busaddr; + u64 *llis_vaddr; + u32 ccfg; + size_t size; + bool done; + bool cyclic; +}; + +enum edmac_dma_chan_state { + EDMAC_CHAN_IDLE, + EDMAC_CHAN_RUNNING, + EDMAC_CHAN_PAUSED, + EDMAC_CHAN_WAITING, +}; + +struct edmac_dma_chan { + bool slave; + int signal; + int id; + struct virt_dma_chan virt_chan; + struct edmac_phy_chan *phychan; + struct dma_slave_config cfg; + struct transfer_desc *at; + struct edmac_driver_data *host; + enum edmac_dma_chan_state state; +}; + +struct edmac_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + struct edmac_dma_chan *serving; +}; + +struct edmac_driver_data { + struct platform_device *dev; + struct dma_device slave; + struct dma_device memcpy; + void __iomem *base; + struct regmap *misc_regmap; + void __iomem *crg_ctrl; + struct edmac_phy_chan *phy_chans; + struct dma_pool *pool; + unsigned int misc_ctrl_base; + int irq; + unsigned int id; + struct clk *clk; + struct clk *axi_clk; + struct reset_control *rstc; + unsigned int channels; + unsigned int slave_requests; + unsigned int max_transfer_size; +}; + +#ifdef DEBUG_EDMAC +void dump_lli(u64 *llis_vaddr, unsigned int num) +{ + + edmac_lli *plli = (edmac_lli *)llis_vaddr; + unsigned int i; + + edmac_trace(3, "lli num = 0%d\n", num); + for (i = 0; i < num; i++) { + printk("lli%d:lli_L: 0x%llx\n", i, plli[i].next_lli & 0xffffffff); + printk("lli%d:lli_H: 0x%llx\n", i, plli[i].next_lli >> 32 & 0xffffffff); + printk("lli%d:count: 0x%llx\n", i, plli[i].count); + printk("lli%d:src_addr_L: 0x%llx\n", i, plli[i].src_addr & 0xffffffff); + printk("lli%d:src_addr_H: 0x%llx\n", i, plli[i].src_addr >> 32 & 0xffffffff); + printk("lli%d:dst_addr_L: 0x%llx\n", i, plli[i].dest_addr & 0xffffffff); + printk("lli%d:dst_addr_H: 0x%llx\n", i, plli[i].dest_addr >> 32 & 0xffffffff); + printk("lli%d:CONFIG: 0x%llx\n", i, plli[i].config); + } +} + +#else +void dump_lli(u64 *llis_vaddr, unsigned int num) +{ +} +#endif + +static inline struct edmac_dma_chan *to_edamc_chan(struct dma_chan *chan) +{ + return container_of(chan, struct edmac_dma_chan, virt_chan.chan); +} + +static inline struct transfer_desc *to_edmac_transfer_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct transfer_desc, virt_desc.tx); +} + +static struct dma_chan *edmac_find_chan_id(struct edmac_driver_data *edmac, + int request_num) +{ + struct edmac_dma_chan *edmac_dma_chan = NULL; + + list_for_each_entry(edmac_dma_chan, &edmac->slave.channels, virt_chan.chan.device_node) { + if (edmac_dma_chan->id == request_num) { + return &edmac_dma_chan->virt_chan.chan; + } + } + return NULL; +} + +static struct dma_chan *edma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct edmac_driver_data *edmac = ofdma->of_dma_data; + struct edmac_dma_chan *edmac_dma_chan = NULL; + struct dma_chan *dma_chan = NULL; + struct regmap *misc = NULL; + unsigned int signal = 0, request_num = 0; + unsigned int reg = 0, offset = 0; + + if (!edmac) { + return NULL; + } + + misc = edmac->misc_regmap; + + if (dma_spec->args_count != 2) { + edmac_error("args count not true!\n"); + return NULL; + } + + request_num = dma_spec->args[0]; + signal = dma_spec->args[1]; + + edmac_trace(3, "host->id = %d,signal = %d, request_num = %d\n", edmac->id, signal, request_num); + + if (misc != NULL) { +#ifdef CONFIG_ACCESS_M7_DEV + offset = edmac->misc_ctrl_base; + reg = 0xc0; + regmap_write(misc, offset, reg); +#else + offset = edmac->misc_ctrl_base + (request_num & (~0x3)); + regmap_read(misc, offset, ®); + reg &= ~(0x3f << ((request_num & 0x3) << 3)); + reg |= signal << ((request_num & 0x3) << 3); + regmap_write(misc, offset, reg); +#endif + } + + edmac_trace(3, "offset = 0x%x, reg = 0x%x\n", offset, reg); + + dma_chan = edmac_find_chan_id(edmac, request_num); + if (!dma_chan) { + edmac_error("DMA slave channel is not found!\n"); + return NULL; + } + + edmac_dma_chan = to_edamc_chan(dma_chan); + edmac_dma_chan->signal = request_num; + + return dma_get_slave_channel(dma_chan); +} + + +static int get_of_probe(struct edmac_driver_data *edmac) +{ + struct resource *res = NULL; + struct platform_device *platdev = edmac->dev; + struct device_node *np = platdev->dev.of_node; + int ret; + + ret = of_property_read_u32((&platdev->dev)->of_node, + "devid", &(edmac->id)); + if (ret) { + edmac_error("get edmac id fail\n"); + return -ENODEV; + } + + edmac->clk = devm_clk_get(&(platdev->dev), "apb_pclk"); + if (IS_ERR(edmac->clk)) { + return PTR_ERR(edmac->clk); + } + + edmac->axi_clk = devm_clk_get(&(platdev->dev), "axi_aclk"); + if (IS_ERR(edmac->axi_clk)) { + return PTR_ERR(edmac->axi_clk); + } + + edmac->rstc = devm_reset_control_get(&(platdev->dev), "dma-reset"); + if (IS_ERR(edmac->rstc)) { + return PTR_ERR(edmac->rstc); + } + + res = platform_get_resource(platdev, IORESOURCE_MEM, 0); + if (!res) { + edmac_error("no reg resource\n"); + return -ENODEV; + } + + edmac->base = devm_ioremap_resource(&(platdev->dev), res); + if (IS_ERR(edmac->base)) { + return PTR_ERR(edmac->base); + } +#if defined(CONFIG_ARCH_GK7205V200) || defined(CONFIG_ARCH_GK7205V300) || \ + defined(CONFIG_ARCH_GK7202V300) || defined(CONFIG_ARCH_GK7605V100) + edmac->misc_regmap = 0; + (void)np; +#else + edmac->misc_regmap = syscon_regmap_lookup_by_phandle(np, "misc_regmap"); + if (IS_ERR(edmac->misc_regmap)) { + return PTR_ERR(edmac->misc_regmap); + } + + ret = of_property_read_u32((&platdev->dev)->of_node, + "misc_ctrl_base", &(edmac->misc_ctrl_base)); + if (ret) { + edmac_error( "get dma-misc_ctrl_base fail\n"); + return -ENODEV; + } +#endif + edmac->irq = platform_get_irq(platdev, 0); + if (unlikely(edmac->irq < 0)) { + return -ENODEV; + } + + ret = of_property_read_u32((&platdev->dev)->of_node, + "dma-channels", &(edmac->channels)); + if (ret) { + edmac_error( "get dma-channels fail\n"); + return -ENODEV; + } + ret = of_property_read_u32((&platdev->dev)->of_node, + "dma-requests", &(edmac->slave_requests)); + if (ret) { + edmac_error( "get dma-requests fail\n"); + return -ENODEV; + } + edmac_trace(2, "dma-channels = %d, dma-requests = %d\n", + edmac->channels, edmac->slave_requests); + return of_dma_controller_register(platdev->dev.of_node, edma_of_xlate, edmac); +} + +static void edmac_free_chan_resources(struct dma_chan *chan) +{ + vchan_free_chan_resources(to_virt_chan(chan)); +} + +static enum dma_status edmac_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + enum dma_status ret = DMA_COMPLETE; + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + struct edmac_phy_chan *phychan = edmac_dma_chan->phychan; + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct virt_dma_desc *vd = NULL; + struct transfer_desc *tsf_desc = NULL; + unsigned long flags; + size_t bytes = 0; + u64 curr_lli = 0, curr_residue_bytes = 0, temp = 0; + edmac_lli *plli = NULL; + unsigned int i = 0, index = 0; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) { + return ret; + } + + spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); + vd = vchan_find_desc(&edmac_dma_chan->virt_chan, cookie); + if (vd) { + /* no been trasfer */ + tsf_desc = to_edmac_transfer_desc(&vd->tx); + bytes = tsf_desc->size; + } else { + /* trasfering */ + tsf_desc = edmac_dma_chan->at; + + if (!phychan || !tsf_desc) { + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + goto out; + } + curr_lli = (edmac_readl(edmac->base + EDMAC_Cx_LLI_L(phychan->id)) & (~(EDMAC_LLI_ALIGN - 1))); + curr_lli |= ((u64)(edmac_readl(edmac->base + EDMAC_Cx_LLI_H(phychan->id)) & 0xffffffff) << 32); + curr_residue_bytes = edmac_readl(edmac->base + EDMAC_Cx_CURR_CNT0(phychan->id)); + if (curr_lli == 0) { + /* It means non-lli mode */ + bytes = curr_residue_bytes; + } else { + /* It means lli mode */ + index = (curr_lli - tsf_desc->llis_busaddr) / sizeof(edmac_lli) - 1; + plli = (edmac_lli *)(tsf_desc->llis_vaddr); + for (i = 0; i < index; i++) { + temp += plli[i].count; + } + temp += plli[i].count - curr_residue_bytes; + bytes = tsf_desc->size - temp; + } + } + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + + dma_set_residue(txstate, bytes); + + if (edmac_dma_chan->state == EDMAC_CHAN_PAUSED && ret == DMA_IN_PROGRESS) { + ret = DMA_PAUSED; + return ret; + } + +out: + return ret; +} + +static struct edmac_phy_chan *edmac_get_phy_channel( + struct edmac_driver_data *edmac, + struct edmac_dma_chan *edmac_dma_chan) +{ + struct edmac_phy_chan *ch = NULL; + unsigned long flags; + int i; + + for (i = 0; i < edmac->channels; i++) { + ch = &edmac->phy_chans[i]; + + spin_lock_irqsave(&ch->lock, flags); + + if (!ch->serving) { + ch->serving = edmac_dma_chan; + spin_unlock_irqrestore(&ch->lock, flags); + break; + } + spin_unlock_irqrestore(&ch->lock, flags); + } + + if (i == edmac->channels) { + return NULL; + } + + return ch; +} + +static void edmac_write_lli(struct edmac_driver_data *edmac, + struct edmac_phy_chan *phychan, + struct transfer_desc *tsf_desc) +{ + + edmac_lli *plli = (edmac_lli *)tsf_desc->llis_vaddr; + + if (plli->next_lli != 0x0) { + edmac_writel((plli->next_lli & 0xffffffff) | EDMAC_LLI_ENABLE, edmac->base + EDMAC_Cx_LLI_L(phychan->id)); + } else { + edmac_writel((plli->next_lli & 0xffffffff), edmac->base + EDMAC_Cx_LLI_L(phychan->id)); + } + + edmac_writel(((plli->next_lli >> 32) & 0xffffffff), edmac->base + EDMAC_Cx_LLI_H(phychan->id)); + edmac_writel(plli->count, edmac->base + EDMAC_Cx_CNT0(phychan->id)); + edmac_writel(plli->src_addr & 0xffffffff, edmac->base + EDMAC_Cx_SRC_ADDR_L(phychan->id)); + edmac_writel((plli->src_addr >> 32) & 0xffffffff, edmac->base + EDMAC_Cx_SRC_ADDR_H(phychan->id)); + edmac_writel(plli->dest_addr & 0xffffffff, edmac->base + EDMAC_Cx_DEST_ADDR_L(phychan->id)); + edmac_writel((plli->dest_addr >> 32) & 0xffffffff, edmac->base + EDMAC_Cx_DEST_ADDR_H(phychan->id)); + edmac_writel(plli->config, edmac->base + EDMAC_Cx_CONFIG(phychan->id)); +} + +static void edmac_start_next_txd(struct edmac_dma_chan *edmac_dma_chan) +{ + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct edmac_phy_chan *phychan = edmac_dma_chan->phychan; + struct virt_dma_desc *vd = vchan_next_desc(&edmac_dma_chan->virt_chan); + struct transfer_desc *tsf_desc = to_edmac_transfer_desc(&vd->tx); + unsigned int val = 0; + + list_del(&tsf_desc->virt_desc.node); + + edmac_dma_chan->at = tsf_desc; + + edmac_write_lli(edmac, phychan, tsf_desc); + + val = edmac_readl(edmac->base + EDMAC_Cx_CONFIG(phychan->id)); + + edmac_trace(2, " EDMAC_Cx_CONFIG = 0x%x\n", val); + edmac_writel(val | EDMAC_CxCONFIG_LLI_START, edmac->base + EDMAC_Cx_CONFIG(phychan->id)); +} + +static void edmac_start(struct edmac_dma_chan * edmac_dma_chan) +{ + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct edmac_phy_chan *ch; + + ch = edmac_get_phy_channel(edmac, edmac_dma_chan); + if (!ch) { + edmac_error("no phy channel available !\n"); + edmac_dma_chan->state = EDMAC_CHAN_WAITING; + return; + } + + edmac_dma_chan->phychan = ch; + edmac_dma_chan->state = EDMAC_CHAN_RUNNING; + + edmac_start_next_txd(edmac_dma_chan); +} + +static void edmac_issue_pending(struct dma_chan *chan) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); + if (vchan_issue_pending(&edmac_dma_chan->virt_chan)) { + if (!edmac_dma_chan->phychan && edmac_dma_chan->state != EDMAC_CHAN_WAITING) { + edmac_start(edmac_dma_chan); + } + } + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); +} + +static void edmac_free_txd_list(struct edmac_dma_chan *edmac_dma_chan) +{ + LIST_HEAD(head); + + vchan_get_all_descriptors(&edmac_dma_chan->virt_chan, &head); + vchan_dma_desc_free_list(&edmac_dma_chan->virt_chan, &head); +} + +static int edmac_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + + if (!edmac_dma_chan->slave) { + edmac_error("slave is null!"); + return -EINVAL; + } + + edmac_dma_chan->cfg = *config; + + return 0; +} + +static void edmac_pause_phy_chan(struct edmac_dma_chan *edmac_dma_chan) +{ + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct edmac_phy_chan *phychan = edmac_dma_chan->phychan; + unsigned int val; + int timeout; + + + val = edmac_readl(edmac->base + EDMAC_Cx_CONFIG(phychan->id)); + val &= ~CCFG_EN; + edmac_writel(val, edmac->base + EDMAC_Cx_CONFIG(phychan->id)); + + /* Wait for channel inactive */ + for (timeout = 2000; timeout > 0; timeout--) { + if (!(0x1 << phychan->id & edmac_readl(edmac->base + EDMAC_CH_STAT))) { + break; + } + edmac_writel(val, edmac->base + EDMAC_Cx_CONFIG(phychan->id)); + udelay(1); + } + + if (timeout == 0) { + edmac_error(":channel%u timeout waiting for pause, timeout:%d\n", + phychan->id, timeout); + } +} + +static int edmac_pause(struct dma_chan *chan) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); + + if (!edmac_dma_chan->phychan) { + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + return 0; + } + + edmac_pause_phy_chan(edmac_dma_chan); + edmac_dma_chan->state = EDMAC_CHAN_PAUSED; + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + + return 0; +} + +static void edmac_resume_phy_chan(struct edmac_dma_chan *edmac_dma_chan) +{ + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct edmac_phy_chan *phychan = edmac_dma_chan->phychan; + unsigned int val; + + val = edmac_readl(edmac->base + EDMAC_Cx_CONFIG(phychan->id)); + val |= CCFG_EN; + edmac_writel(val, edmac->base + EDMAC_Cx_CONFIG(phychan->id)); +} + +static int edmac_resume(struct dma_chan *chan) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); + + if (!edmac_dma_chan->phychan) { + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + return 0; + } + + edmac_resume_phy_chan(edmac_dma_chan); + edmac_dma_chan->state = EDMAC_CHAN_RUNNING; + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + + return 0; +} + +void edmac_phy_free(struct edmac_dma_chan *chan); +static void edmac_desc_free(struct virt_dma_desc *vd); +static int edmac_terminate_all(struct dma_chan *chan) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); + if (!edmac_dma_chan->phychan && !edmac_dma_chan->at) { + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + return 0; + } + + edmac_dma_chan->state = EDMAC_CHAN_IDLE; + + if (edmac_dma_chan->phychan) { + edmac_phy_free(edmac_dma_chan); + } + + if (edmac_dma_chan->at) { + edmac_desc_free(&edmac_dma_chan->at->virt_desc); + edmac_dma_chan->at = NULL; + } + edmac_free_txd_list(edmac_dma_chan); + + spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); + + return 0; +} + +static struct transfer_desc *edmac_get_tsf_desc(struct edmac_driver_data *plchan) +{ + struct transfer_desc *tsf_desc = kzalloc(sizeof(struct transfer_desc), GFP_NOWAIT); + + if (tsf_desc) { + tsf_desc->ccfg = 0; + } + + return tsf_desc; +} + +static void edmac_free_tsf_desc(struct edmac_driver_data *edmac, + struct transfer_desc *tsf_desc) +{ + if (tsf_desc->llis_vaddr) { + dma_pool_free(edmac->pool, tsf_desc->llis_vaddr, tsf_desc->llis_busaddr); + } + + kfree(tsf_desc); +} + +static u32 get_width(enum dma_slave_buswidth width) +{ + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + return EDMAC_WIDTH_8BIT; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + return EDMAC_WIDTH_16BIT; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + return EDMAC_WIDTH_32BIT; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + return EDMAC_WIDTH_64BIT; + default: + edmac_error("check here, width warning!\n"); + return ~0; + } +} + +struct transfer_desc *edmac_init_tsf_desc ( + struct dma_chan *chan, + enum dma_transfer_direction direction, + dma_addr_t *slave_addr) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct transfer_desc *tsf_desc; + unsigned int config = 0, burst = 0; + unsigned int addr_width = 0, maxburst = 0; + unsigned int width = 0; + + tsf_desc = edmac_get_tsf_desc(edmac); + if (!tsf_desc) { + edmac_error("get tsf desc fail!\n"); + return NULL; + } + + if (direction == DMA_MEM_TO_DEV) { + config = EDMAC_CONFIG_SRC_INC; + *slave_addr = edmac_dma_chan->cfg.dst_addr; + addr_width = edmac_dma_chan->cfg.dst_addr_width; + maxburst = edmac_dma_chan->cfg.dst_maxburst; + } else if (direction == DMA_DEV_TO_MEM) { + config = EDMAC_CONFIG_DST_INC; + *slave_addr = edmac_dma_chan->cfg.src_addr; + addr_width = edmac_dma_chan->cfg.src_addr_width; + maxburst = edmac_dma_chan->cfg.src_maxburst; + } else { + edmac_free_tsf_desc(edmac, tsf_desc); + edmac_error("direction unsupported!\n"); + return NULL; + } + + edmac_trace(3, "addr_width = 0x%x\n", addr_width); + width = get_width(addr_width); + edmac_trace(3, "width = 0x%x\n", width); + config |= width << EDMAC_CONFIG_SRC_WIDTH_SHIFT; + config |= width << EDMAC_CONFIG_DST_WIDTH_SHIFT; + edmac_trace(2, "tsf_desc->ccfg = 0x%x\n", config); + + edmac_trace(3, "maxburst = 0x%x\n", maxburst); + if (maxburst > (EDMAC_MAX_BURST_WIDTH)) { + burst |= (EDMAC_MAX_BURST_WIDTH - 1); + } else if (maxburst == 0) { + burst |= EDMAC_MIN_BURST_WIDTH; + } else { + burst |= (maxburst - 1); + } + edmac_trace(3, "burst = 0x%x\n", burst); + config |= burst << EDMAC_CONFIG_SRC_BURST_SHIFT; + config |= burst << EDMAC_CONFIG_DST_BURST_SHIFT; + + if (edmac_dma_chan->signal >= 0) { + edmac_trace(2, "edmac_dma_chan->signal = %d\n", edmac_dma_chan->signal); + config |= (unsigned int)edmac_dma_chan->signal << EDMAC_CXCONFIG_SIGNAL_SHIFT; + } + + config |= EDMAC_CXCONFIG_DEV_MEM_TYPE << EDMAC_CXCONFIG_TSF_TYPE_SHIFT; + tsf_desc->ccfg = config; + edmac_trace(2, "tsf_desc->ccfg = 0x%x\n", tsf_desc->ccfg); + + return tsf_desc; +} + +static void edmac_fill_desc(struct transfer_desc *tsf_desc, dma_addr_t src, + dma_addr_t dst, unsigned int length, unsigned int num) +{ + edmac_lli *plli; + + plli = (edmac_lli *)(tsf_desc->llis_vaddr); + memset(&plli[num], 0x0, sizeof(edmac_lli)); + + plli[num].src_addr = src; + plli[num].dest_addr = dst; + plli[num].config = tsf_desc->ccfg; + plli[num].count = length; + tsf_desc->size += length; + + if (num > 0) { + plli[num - 1].next_lli = (tsf_desc->llis_busaddr + (num) * sizeof(edmac_lli)) & (~(EDMAC_LLI_ALIGN - 1)); + plli[num - 1].next_lli |= EDMAC_LLI_ENABLE; + } +} + +static struct dma_async_tx_descriptor *edmac_perp_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct transfer_desc *tsf_desc = NULL; + struct scatterlist *sg = NULL; + int tmp = 0; + dma_addr_t src = 0, dst = 0, addr = 0, slave_addr = 0; + unsigned int length = 0, num = 0; + + edmac_lli *last_plli = NULL; + + if (sgl == NULL) { + edmac_error("sgl is null!\n"); + return NULL; + } + + tsf_desc = edmac_init_tsf_desc(chan, direction, &slave_addr); + if (!tsf_desc) { + edmac_error("desc init fail\n"); + return NULL; + } + + tsf_desc->llis_vaddr = dma_pool_alloc(edmac->pool, GFP_NOWAIT, &tsf_desc->llis_busaddr); + if (!tsf_desc->llis_vaddr) { + edmac_free_tsf_desc(edmac, tsf_desc); + edmac_error("malloc memory from pool fail !\n"); + return 0; + } + + for_each_sg(sgl, sg, sg_len, tmp) { + addr = sg_dma_address(sg); + length = sg_dma_len(sg); + if (direction == DMA_MEM_TO_DEV) { + src = addr; + dst = slave_addr; + } else if (direction == DMA_DEV_TO_MEM) { + src = slave_addr; + dst = addr; + } + edmac_fill_desc(tsf_desc, src, dst, length, num); + num++; + } + + last_plli = (edmac_lli *)((unsigned long)tsf_desc->llis_vaddr + (num - 1) * sizeof(edmac_lli)); + last_plli->next_lli |= EDMAC_LLI_DISABLE; + dump_lli(tsf_desc->llis_vaddr, num); + + return vchan_tx_prep(&edmac_dma_chan->virt_chan, &tsf_desc->virt_desc, flags); +} + +static struct dma_async_tx_descriptor *edmac_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct transfer_desc *tsf_desc = NULL; + u32 config = 0; + size_t num = 0; + size_t length = 0; + edmac_lli *last_plli = NULL; + + if (!len) { + return NULL; + } + + tsf_desc = edmac_get_tsf_desc(edmac); + if (!tsf_desc) { + edmac_error("get tsf desc fail!\n"); + return NULL; + } + + tsf_desc->llis_vaddr = dma_pool_alloc(edmac->pool, GFP_NOWAIT, &tsf_desc->llis_busaddr); + if (!tsf_desc->llis_vaddr) { + edmac_free_tsf_desc(edmac, tsf_desc); + edmac_error("malloc memory from pool fail !\n"); + return 0; + } + + config |= EDMAC_CONFIG_SRC_INC | EDMAC_CONFIG_DST_INC; + config |= EDMAC_CXCONFIG_MEM_TYPE << EDMAC_CXCONFIG_TSF_TYPE_SHIFT; + + /* max burst width is 16 ,but reg value set 0xf */ + config |= (EDMAC_MAX_BURST_WIDTH - 1) << EDMAC_CONFIG_SRC_BURST_SHIFT; + config |= (EDMAC_MAX_BURST_WIDTH - 1) << EDMAC_CONFIG_DST_BURST_SHIFT; + + tsf_desc->ccfg = config; + + do { + length = min_t(size_t, len, MAX_TRANSFER_BYTES); + edmac_fill_desc(tsf_desc, src, dest, length, num); + + src += length; + dest += length; + len -= length; + num++; + } while(len); + + last_plli = (edmac_lli *)((unsigned long)tsf_desc->llis_vaddr + (num - 1) * sizeof(edmac_lli)); + last_plli->next_lli |= EDMAC_LLI_DISABLE; + dump_lli(tsf_desc->llis_vaddr, num); + + return vchan_tx_prep(&edmac_dma_chan->virt_chan, &tsf_desc->virt_desc, flags); +} + + + +static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct edmac_dma_chan *edmac_dma_chan = to_edamc_chan(chan); + struct edmac_driver_data *edmac = edmac_dma_chan->host; + struct transfer_desc *tsf_desc; + dma_addr_t src = 0, dst = 0, addr = 0, slave_addr = 0; + size_t length = 0, since = 0, total = 0, num = 0, len = 0; + edmac_lli *last_plli = NULL; + edmac_lli *plli = NULL; + + tsf_desc = edmac_init_tsf_desc(chan, direction, &slave_addr); + if (!tsf_desc) { + edmac_error("desc init fail\n"); + return NULL; + } + + tsf_desc->llis_vaddr = dma_pool_alloc(edmac->pool, GFP_NOWAIT, &tsf_desc->llis_busaddr); + if (!tsf_desc->llis_vaddr) { + edmac_free_tsf_desc(edmac, tsf_desc); + edmac_error("malloc memory from pool fail !\n"); + return 0; + } + + tsf_desc->cyclic = true; + addr = buf_addr; + total = buf_len; + + if (period_len < MAX_TRANSFER_BYTES) { + len = period_len; + } + do { + length = min_t(size_t, total, len); + + if (direction == DMA_MEM_TO_DEV) { + src = addr; + dst = slave_addr; + } else if (direction == DMA_DEV_TO_MEM) { + src = slave_addr; + dst = addr; + } + + edmac_fill_desc(tsf_desc, src, dst, length, num); + + since += length; + if (since >= period_len) { + plli = (edmac_lli *)((unsigned long)tsf_desc->llis_vaddr + (num) * sizeof(edmac_lli)); + plli->config |= EDMAC_CXCONFIG_ITC_EN << EDMAC_CXCONFIG_ITC_EN_SHIFT; + since -= period_len; + } + addr += length; + total -= length; + num++; + } while(total); + + last_plli = (edmac_lli *)((unsigned long)tsf_desc->llis_vaddr + (num - 1) * sizeof(edmac_lli)); + + last_plli->next_lli = (unsigned long)(tsf_desc->llis_vaddr); + + dump_lli(tsf_desc->llis_vaddr, num); + + return vchan_tx_prep(&edmac_dma_chan->virt_chan, &tsf_desc->virt_desc, flags); +} + + +static void edmac_phy_reassign(struct edmac_phy_chan *phy_chan, + struct edmac_dma_chan *chan) +{ + phy_chan->serving = chan; + chan->phychan = phy_chan; + chan->state = EDMAC_CHAN_RUNNING; + + edmac_start_next_txd(chan); +} + +static void edmac_terminate_phy_chan(struct edmac_driver_data *edmac, + struct edmac_dma_chan *edmac_dma_chan) +{ + unsigned int val; + struct edmac_phy_chan *phychan = edmac_dma_chan->phychan; + + edmac_pause_phy_chan(edmac_dma_chan); + + val = 0x1 << phychan->id; + + edmac_writel(val, edmac->base + EDMAC_INT_TC1_RAW); + edmac_writel(val, edmac->base + EDMAC_INT_ERR1_RAW); + edmac_writel(val, edmac->base + EDMAC_INT_ERR2_RAW); +} + +void edmac_phy_free(struct edmac_dma_chan *chan) +{ + struct edmac_driver_data *edmac = chan->host; + struct edmac_dma_chan *p = NULL; + struct edmac_dma_chan *next = NULL; + + list_for_each_entry(p, &edmac->memcpy.channels, virt_chan.chan.device_node) { + if (p->state == EDMAC_CHAN_WAITING) { + next = p; + break; + } + } + + if (!next) { + list_for_each_entry(p, &edmac->slave.channels, virt_chan.chan.device_node) { + if (p->state == EDMAC_CHAN_WAITING) { + next = p; + break; + } + } + } + edmac_terminate_phy_chan(edmac, chan); + + if (next) { + spin_lock(&next->virt_chan.lock); + edmac_phy_reassign(chan->phychan, next); + spin_unlock(&next->virt_chan.lock); + } else { + chan->phychan->serving = NULL; + } + + chan->phychan = NULL; + chan->state = EDMAC_CHAN_IDLE; +} + +static irqreturn_t edmac_irq(int irq, void *dev) +{ + struct edmac_driver_data *edmac = (struct edmac_driver_data *)dev; + struct edmac_dma_chan *chan = NULL; + struct edmac_phy_chan *phy_chan = NULL; + struct transfer_desc * tsf_desc = NULL; + + u32 mask = 0; + unsigned int channel_err_status[3]; + unsigned int channel_status = 0; + unsigned int temp = 0; + unsigned int channel_tc_status = -1; + unsigned int i = 0; + + channel_status = edmac_readl(edmac->base + EDMAC_INT_STAT); + + if (!channel_status) { + edmac_error("channel_status = 0x%x\n", channel_status); + return IRQ_NONE; + } + + for (i = 0; i < edmac->channels; i++) { + temp = (channel_status >> i) & 0x1; + if (temp) { + phy_chan = &edmac->phy_chans[i]; + chan = phy_chan->serving; + if (!chan) { + edmac_error("error interrupt on chan: %d!\n", i); + continue; + } + tsf_desc = chan->at; + + channel_tc_status = edmac_readl(edmac->base + EDMAC_INT_TC1_RAW); + channel_tc_status = (channel_tc_status >> i) & 0x01; + if (channel_tc_status) { + edmac_writel(channel_tc_status << i, edmac->base + EDMAC_INT_TC1_RAW); + } + + channel_tc_status = edmac_readl(edmac->base + EDMAC_INT_TC2); + channel_tc_status = (channel_tc_status >> i) & 0x01; + if (channel_tc_status) { + edmac_writel(channel_tc_status << i, edmac->base + EDMAC_INT_TC2_RAW); + } + + channel_err_status[0] = edmac_readl(edmac->base + EDMAC_INT_ERR1); + channel_err_status[0] = (channel_err_status[0] >> i) & 0x01; + channel_err_status[1] = edmac_readl(edmac->base + EDMAC_INT_ERR2); + channel_err_status[1] = (channel_err_status[1] >> i) & 0x01; + channel_err_status[2] = edmac_readl(edmac->base + EDMAC_INT_ERR3); + channel_err_status[2] = (channel_err_status[2] >> i) & 0x01; + if (channel_err_status[0] | channel_err_status[1] | channel_err_status[2]) { + edmac_error("Error in edmac %d finish!,ERR1 = 0x%x,ERR2 = 0x%x,ERR3 = 0x%x\n", + i, channel_err_status[0], channel_err_status[1], channel_err_status[2]); + edmac_writel(1 << i, edmac->base + EDMAC_INT_ERR1_RAW); + edmac_writel(1 << i, edmac->base + EDMAC_INT_ERR2_RAW); + edmac_writel(1 << i, edmac->base + EDMAC_INT_ERR3_RAW); + } + + spin_lock(&chan->virt_chan.lock); + + if (tsf_desc->cyclic) { + vchan_cyclic_callback(&tsf_desc->virt_desc); + spin_unlock(&chan->virt_chan.lock); + continue; + } + chan->at = NULL; + tsf_desc->done = true; + vchan_cookie_complete(&tsf_desc->virt_desc); + + if (vchan_next_desc(&chan->virt_chan)) { + edmac_start_next_txd(chan); + } else { + edmac_phy_free(chan); + } + + spin_unlock(&chan->virt_chan.lock); + mask |= (1 << i); + } + } + + return mask ? IRQ_HANDLED : IRQ_NONE; +} + +static void edmac_dma_slave_init(struct edmac_dma_chan *chan) +{ + chan->slave = true; +} + +static void edmac_desc_free(struct virt_dma_desc *vd) +{ + struct transfer_desc *tsf_desc = to_edmac_transfer_desc(&vd->tx); + struct edmac_dma_chan * edmac_dma_chan = to_edamc_chan(vd->tx.chan); + + dma_descriptor_unmap(&vd->tx); + edmac_free_tsf_desc(edmac_dma_chan->host, tsf_desc); +} + +static int edmac_init_virt_channels(struct edmac_driver_data *edmac, + struct dma_device *dmadev, unsigned int channels, bool slave) +{ + struct edmac_dma_chan *chan = NULL; + int i; + INIT_LIST_HEAD(&dmadev->channels); + + for (i = 0; i < channels; i++) { + chan = kzalloc(sizeof(struct edmac_dma_chan), GFP_KERNEL); + if (!chan) { + edmac_error("fail to allocate memory for virt channels!"); + return -1; + } + + chan->host = edmac; + chan->state = EDMAC_CHAN_IDLE; + chan->signal = -1; + + if (slave) { + chan->id = i; + edmac_dma_slave_init(chan); + } + chan->virt_chan.desc_free = edmac_desc_free; + vchan_init(&chan->virt_chan, dmadev); + } + return 0; +} + +void edmac_free_virt_channels(struct dma_device *dmadev) +{ + struct edmac_dma_chan *chan = NULL; + struct edmac_dma_chan *next = NULL; + + list_for_each_entry_safe(chan, + next, &dmadev->channels, virt_chan.chan.device_node) { + list_del(&chan->virt_chan.chan.device_node); + kfree(chan); + } +} + + +#define MAX_TSFR_LLIS 32 +#define EDMACV300_LLI_WORDS 16 +#define EDMACV300_POOL_ALIGN 16 + +static int __init edmac_probe(struct platform_device *pdev) +{ + + int ret = 0, i = 0; + struct edmac_driver_data *edmac = NULL; + size_t trasfer_size = 0; + + ret = dma_set_mask_and_coherent(&(pdev->dev), DMA_BIT_MASK(64)); + if (ret) { + return ret; + } + + edmac = kzalloc(sizeof(*edmac), GFP_KERNEL); + if (!edmac) { + edmac_error("malloc for edmac fail!"); + ret = -ENOMEM; + return ret; + } + edmac->dev = pdev; + + ret = get_of_probe(edmac); + if (ret) { + edmac_error("get dts info fail!"); + goto free_edmac; + } + + + clk_prepare_enable(edmac->clk); + clk_prepare_enable(edmac->axi_clk); + + reset_control_deassert(edmac->rstc); + + dma_cap_set(DMA_MEMCPY, edmac->memcpy.cap_mask); + edmac->memcpy.dev = &pdev->dev; + edmac->memcpy.device_free_chan_resources = edmac_free_chan_resources; + edmac->memcpy.device_prep_dma_memcpy = edmac_prep_dma_memcpy; + edmac->memcpy.device_tx_status = edmac_tx_status; + edmac->memcpy.device_issue_pending = edmac_issue_pending; + edmac->memcpy.device_config = edmac_config; + edmac->memcpy.device_pause = edmac_pause; + edmac->memcpy.device_resume = edmac_resume; + edmac->memcpy.device_terminate_all = edmac_terminate_all; + edmac->memcpy.directions = BIT(DMA_MEM_TO_MEM); + edmac->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + + dma_cap_set(DMA_SLAVE, edmac->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, edmac->slave.cap_mask); + edmac->slave.dev = &pdev->dev; + edmac->slave.device_free_chan_resources = edmac_free_chan_resources; + edmac->slave.device_tx_status = edmac_tx_status; + edmac->slave.device_issue_pending = edmac_issue_pending; + edmac->slave.device_prep_slave_sg = edmac_perp_slave_sg; + edmac->slave.device_prep_dma_cyclic = edma_prep_dma_cyclic; + edmac->slave.device_config = edmac_config; + edmac->slave.device_resume = edmac_resume; + edmac->slave.device_pause = edmac_pause; + edmac->slave.device_terminate_all = edmac_terminate_all; + edmac->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + edmac->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + + edmac->max_transfer_size = MAX_TRANSFER_BYTES; + trasfer_size = MAX_TSFR_LLIS * EDMACV300_LLI_WORDS * sizeof(u32); + + edmac->pool = dma_pool_create(DRIVER_NAME, &(pdev->dev), + trasfer_size, EDMACV300_POOL_ALIGN, 0); + if (!edmac->pool) { + edmac_error("create pool fail!"); + ret = -ENOMEM; + goto free_edmac; + } + + edmac_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_TC1_RAW); + edmac_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_TC2_RAW); + edmac_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR1_RAW); + edmac_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR2_RAW); + edmac_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR3_RAW); + + edmac_writel(EDMAC_INT_ENABLE_ALL_CHAN, edmac->base + EDMAC_INT_TC1_MASK); + edmac_writel(EDMAC_INT_ENABLE_ALL_CHAN, edmac->base + EDMAC_INT_TC2_MASK); + edmac_writel(EDMAC_INT_ENABLE_ALL_CHAN, edmac->base + EDMAC_INT_ERR1_MASK); + edmac_writel(EDMAC_INT_ENABLE_ALL_CHAN, edmac->base + EDMAC_INT_ERR2_MASK); + edmac_writel(EDMAC_INT_ENABLE_ALL_CHAN, edmac->base + EDMAC_INT_ERR3_MASK); + + ret = request_irq(edmac->irq, edmac_irq, 0, DRIVER_NAME, edmac); + if (ret) { + edmac_error("fail to request irq"); + goto free_pool; + } + + edmac->phy_chans = kzalloc((edmac->channels * sizeof(struct edmac_phy_chan)), + GFP_KERNEL); + if (!edmac->phy_chans) { + edmac_error("malloc for phy chans fail!"); + ret = -ENOMEM; + goto free_irq_res; + } + + /* initialize the phy chan */ + for (i = 0; i < edmac->channels; i++) { + struct edmac_phy_chan* phy_ch = &edmac->phy_chans[i]; + phy_ch->id = i; + phy_ch->base = edmac->base + EDMAC_Cx_BASE(i); + spin_lock_init(&phy_ch->lock); + phy_ch->serving = NULL; + } + + /* initialize the memory virt chan */ + ret = edmac_init_virt_channels(edmac, &edmac->memcpy, edmac->channels, false); + if (ret) { + edmac_error("fail to init memory virt channels!"); + goto free_phychans; + } + + /* initialize the slave virt chan */ + ret = edmac_init_virt_channels(edmac, &edmac->slave, edmac->slave_requests, true); + if (ret) { + edmac_error("fail to init slave virt channels!"); + goto free_memory_virt_channels; + + } + + ret = dma_async_device_register(&edmac->memcpy); + if (ret) { + edmac_error( + "%s failed to register memcpy as an async device - %d\n", + __func__, ret); + goto free_slave_virt_channels; + } + + ret = dma_async_device_register(&edmac->slave); + if (ret) { + edmac_error( + "%s failed to register slave as an async device - %d\n", + __func__, ret); + goto free_memcpy_device; + } + + return 0; + +free_memcpy_device: + dma_async_device_unregister(&edmac->memcpy); +free_slave_virt_channels: + edmac_free_virt_channels(&edmac->slave); +free_memory_virt_channels: + edmac_free_virt_channels(&edmac->memcpy); +free_phychans: + kfree(edmac->phy_chans); +free_irq_res: + free_irq(edmac->irq, edmac); +free_pool: + dma_pool_destroy(edmac->pool); +free_edmac: + kfree(edmac); + + return ret; +} + + +static int edma_remove(struct platform_device *pdev) +{ + int err = 0; + return err; +} + + +static const struct of_device_id edmac_match[] = { + { .compatible = "goke,edmac" }, + {}, +}; + + +static struct platform_driver edmac_driver = { + .remove = edma_remove, + .driver = { + .name = "edmac", + .of_match_table = edmac_match, + }, +}; + +static int __init edmac_init(void) +{ + return platform_driver_probe(&edmac_driver, edmac_probe); +} +subsys_initcall(edmac_init); + +static void __exit edmac_exit(void) +{ + platform_driver_unregister(&edmac_driver); +} +module_exit(edmac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Goke");