firmware/br-ext-chip-goke/board/gk7205v200/kernel/patches/00_drivers-net-ethernet-gok...

1564 lines
39 KiB
Diff

--- linux-4.9.37/drivers/net/ethernet/goke/femac/femac.c 1970-01-01 03:00:00.000000000 +0300
+++ linux-4.9.y/drivers/net/ethernet/goke/femac/femac.c 2021-06-07 13:01:33.000000000 +0300
@@ -0,0 +1,1560 @@
+/*
+ * Copyright (c) Hunan Goke,Chengdu Goke,Shandong Goke. 2021. All rights reserved.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+#include <net/protocol.h>
+
+#include "phy_fix.h"
+#include "femac.h"
+#include "util.h"
+
+static void femac_irq_enable(const struct femac_priv *priv, u32 irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void femac_irq_disable(const struct femac_priv *priv, u32 irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
+}
+
+#ifdef CONFIG_FEPHY_OPT
+static u32 highflag = 0;
+static u32 lowflag = 0;
+static void femac_trim_phy(struct phy_device *phy_dev, u32 val)
+{
+ u32 val1;
+ /* 32 pieces of data */
+ int table[32] = {0x11, 0x10, 0x10, 0xf, 0xe, 0xd, 0xd, 0xc,
+ 0xb, 0xa, 0xa, 0x9, 0x8, 0x7, 0x7, 0x6,
+ 0x5, 0x5, 0x4, 0x3, 0x2, 0x2, 0x1, 0x0,
+ 0x3f, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3a
+ };
+
+ phy_write(phy_dev, MII_EXPMA, FEPHY_TRIM_CACHE);
+ phy_write(phy_dev, MII_EXPMD, val);
+ val &= 0x1f;
+ val1 = table[val];
+ phy_write(phy_dev, MII_EXPMA, FEPHY_TRIM_VALUE);
+ val = phy_read(phy_dev, MII_EXPMD);
+ val = (val1 << 2) | (val & 0x3); /* shift left 2 bits */
+ phy_write(phy_dev, MII_EXPMA, FEPHY_TRIM_VALUE);
+ phy_write(phy_dev, MII_EXPMD, val);
+}
+
+static void femac_trim(struct net_device *dev)
+{
+ struct phy_device *phy_dev = NULL;
+ int temp;
+ u32 val;
+
+ phy_dev = dev->phydev;
+ if (phy_dev == NULL) {
+ pr_err("get phy device failed \n");
+ return;
+ }
+
+ phy_write(phy_dev, MII_EXPMA, FEPHY_TRIM_CACHE);
+ val = phy_read(phy_dev, MII_EXPMD);
+ temp = regval_to_temp(val);
+ if ((temp > HIGH_TEMP) && (highflag == 0)) {
+ highflag = 1;
+ if ((val & 0x1f) > 1)
+ val = (val & 0xe0) | ((val & 0x1f) - 1);
+ else
+ return;
+ femac_trim_phy(phy_dev, val);
+ }
+ if ((temp < NORMAL_TEMP1) && (highflag == 1)) {
+ highflag = 0;
+ if ((val & 0x1f) < 0x1f)
+ val = (val & 0xe0) | ((val & 0x1f) + 1);
+ else
+ return;
+ femac_trim_phy(phy_dev, val);
+ }
+ if ((temp > NORMAL_TEMP2) && (lowflag == 0)) {
+ lowflag = 1;
+ if ((val & 0x1f) > 1)
+ val = (val & 0xe0) | ((val & 0x1f) - 1);
+ else
+ return;
+ femac_trim_phy(phy_dev, val);
+ }
+ if ((temp < LOW_TEMP) && (lowflag == 1)) {
+ lowflag = 0;
+ if ((val & 0x1f) < 0x1f)
+ val = (val & 0xe0) | ((val & 0x1f) + 1);
+ else
+ return;
+ femac_trim_phy(phy_dev, val);
+ }
+}
+
+static void femac_watchdog(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct femac_priv *priv = container_of(dwork, struct femac_priv, watchdog_queue);
+ void __iomem *sys_reg_addr;
+ struct net_device *dev = NULL;
+ u32 val;
+
+ dev = priv->ndev;
+ if (dev == NULL) {
+ pr_err("get net device failed \n");
+ return;
+ }
+
+ sys_reg_addr = (void __iomem *)ioremap_nocache(SYS_REG_ADDR, 0x100);
+ if (!sys_reg_addr) {
+ pr_err("iomap failed \n");
+ return;
+ }
+ val = readl(sys_reg_addr + MISC_CTRL45);
+ if ((val >> 30) != 0x3) { /* bit[30 31] */
+ val |= TSENSOR_EN;
+ writel(val, sys_reg_addr + MISC_CTRL45);
+ mdelay(10); /* wait 10ms */
+ }
+ val = readl(sys_reg_addr + MISC_CTRL47) & TSENSOR_RESULT0;
+ /* high 16bit */
+ val += (readl(sys_reg_addr + MISC_CTRL47) & TSENSOR_RESULT1) >> 16;
+ val += readl(sys_reg_addr + MISC_CTRL48) & TSENSOR_RESULT2;
+ /* high 16bit */
+ val += (readl(sys_reg_addr + MISC_CTRL48) & TSENSOR_RESULT3) >> 16;
+ val = val / 4; /* average value of the 4 values */
+ if (val < LOW_TEM_VALUE || val > HIGH_TEM_VALUE) {
+ goto out;
+ }
+ femac_trim(dev);
+out:
+ iounmap(sys_reg_addr);
+ schedule_delayed_work(&priv->watchdog_queue, FEPHY_OPT_TIMER);
+}
+#endif
+
+static void femac_tx_sg_dma_unmap(const struct femac_priv *priv,
+ const struct sk_buff *skb, unsigned int pos)
+{
+ struct tx_desc *desc_cur;
+ dma_addr_t addr;
+ u32 len;
+ int i;
+
+ desc_cur = priv->tx_ring.desc + pos;
+
+ addr = desc_cur->linear_addr;
+ len = desc_cur->linear_len;
+ dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ addr = desc_cur->frags[i].addr;
+ len = desc_cur->frags[i].size;
+ dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
+static void femac_tx_dma_unmap(const struct femac_priv *priv,
+ const struct sk_buff *skb, unsigned int pos)
+{
+ if (!(skb_is_gso(skb) || skb_shinfo(skb)->nr_frags)) {
+ dma_addr_t dma_addr;
+
+ dma_addr = priv->txq.dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+ } else {
+ femac_tx_sg_dma_unmap(priv, skb, pos);
+ }
+}
+
+static void femac_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb = NULL;
+ struct femac_priv *priv = netdev_priv(dev);
+ struct femac_queue *txq = &priv->txq;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts_compl = 0;
+ u32 val;
+
+ netif_tx_lock(dev);
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ while (val < priv->tx_fifo_used_cnt) {
+ skb = txq->skb[txq->tail];
+ if (unlikely(skb == NULL)) {
+ netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
+ val, priv->tx_fifo_used_cnt);
+ break;
+ }
+ femac_tx_dma_unmap(priv, skb, txq->tail);
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_kfree_skb_any(skb);
+
+ priv->tx_fifo_used_cnt--;
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ txq->skb[txq->tail] = NULL;
+ txq->tail = (txq->tail + 1) % txq->num;
+ }
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
+ netif_wake_queue(dev);
+
+ netif_tx_unlock(dev);
+}
+
+static void femac_get_tso_err_info(const struct femac_priv *priv)
+{
+ unsigned int reg_addr, reg_tx_info, reg_tx_err;
+ unsigned int sg_index;
+ struct tx_desc *sg_desc = NULL;
+ int *sg_word = NULL;
+ int i;
+
+ reg_addr = readl(priv->port_base + TSO_DBG_ADDR);
+ reg_tx_info = readl(priv->port_base + TSO_DBG_TX_INFO);
+ reg_tx_err = readl(priv->port_base + TSO_DBG_TX_ERR);
+
+ WARN(1, "tx err=0x%x, tx_info=0x%x, addr=0x%x\n",
+ reg_tx_err, reg_tx_info, reg_addr);
+
+ sg_index = (reg_addr - priv->tx_ring.dma_phys) / sizeof(struct tx_desc);
+ sg_desc = priv->tx_ring.desc + sg_index;
+ sg_word = (int *)sg_desc;
+ for (i = 0; i < sizeof(struct tx_desc) / sizeof(int); i++)
+ pr_err("%s,%d: sg_desc word[%d]=0x%x\n",
+ __func__, __LINE__, i, sg_word[i]);
+
+ /* restart MAC to transmit next packet */
+ femac_irq_disable(priv, INT_TX_ERR);
+ /*
+ * If we need allow netcard transmit packet again.
+ * we should readl TSO_DBG_STATE and enable irq.
+ */
+}
+
+static netdev_tx_t femac_net_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+
+static netdev_tx_t femac_sw_gso(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct sk_buff *segs = NULL;
+ struct sk_buff *curr_skb = NULL;
+ netdev_features_t features = dev->features;
+
+ features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO);
+ segs = skb_gso_segment(skb, features);
+ if (IS_ERR_OR_NULL(segs)) {
+ goto drop;
+ }
+
+ do {
+ curr_skb = segs;
+ segs = segs->next;
+ curr_skb->next = NULL;
+ if (femac_net_xmit(curr_skb, dev)) {
+ dev_kfree_skb(curr_skb);
+ while (segs != NULL) {
+ curr_skb = segs;
+ segs = segs->next;
+ curr_skb->next = NULL;
+ dev_kfree_skb_any(curr_skb);
+ }
+ goto drop;
+ }
+ } while (segs != NULL);
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int femac_fill_sg_desc(const struct femac_priv *priv,
+ const struct sk_buff *skb, unsigned int pos)
+{
+ struct tx_desc *desc_cur;
+ dma_addr_t addr;
+ int ret;
+ int i;
+
+ desc_cur = priv->tx_ring.desc + pos;
+
+ desc_cur->ipv6_id = ntohl(skb_shinfo(skb)->ip6_frag_id);
+
+ desc_cur->total_len = skb->len;
+ addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ return -EINVAL;
+ }
+ desc_cur->linear_addr = addr;
+ desc_cur->linear_len = skb_headlen(skb);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = frag->size;
+
+ addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(priv->dev, addr);
+ if (unlikely(ret)) {
+ return -EINVAL;
+ }
+ desc_cur->frags[i].addr = addr;
+ desc_cur->frags[i].size = len;
+ }
+
+ return 0;
+}
+
+static void femac_adjust_link(struct net_device *dev)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+ u32 status = 0;
+
+ if (phy->link)
+ status |= MAC_PORTSET_LINKED;
+ if (phy->duplex == DUPLEX_FULL)
+ status |= MAC_PORTSET_DUPLEX_FULL;
+ if (phy->speed == SPEED_100)
+ status |= MAC_PORTSET_SPEED_100M;
+
+ if ((status != priv->link_status) &&
+ ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
+ writel(status, priv->port_base + MAC_PORTSET);
+ priv->link_status = status;
+ phy_print_status(phy);
+
+ priv->tx_pause_en = phy->pause;
+ femac_set_flow_ctrl(priv);
+ }
+}
+
+static void femac_rx_refill(struct femac_priv *priv)
+{
+ struct femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb = NULL;
+ u32 pos;
+ u32 len;
+ dma_addr_t addr;
+ u32 alloc_rxbuf_align;
+ int reserve_room;
+
+ pos = rxq->head;
+ while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
+ if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) {
+ break;
+ }
+ if (unlikely(rxq->skb[pos])) {
+ netdev_err(priv->ndev, "err skb[%d]=%p\n",
+ pos, rxq->skb[pos]);
+ break;
+ }
+ len = MAX_FRAME_SIZE + RXBUF_ADDR_ALIGN_SIZE;
+ skb = netdev_alloc_skb_ip_align(priv->ndev, len);
+ if (unlikely(skb == NULL)) {
+ break;
+ }
+
+ alloc_rxbuf_align = ((uintptr_t)skb->data - NET_IP_ALIGN) &
+ (RXBUF_ADDR_ALIGN_SIZE - 1);
+ if (alloc_rxbuf_align) {
+ reserve_room = RXBUF_ADDR_ALIGN_SIZE -
+ alloc_rxbuf_align;
+ len -= reserve_room;
+ skb_reserve(skb, reserve_room);
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rxq->dma_phys[pos] = addr;
+ rxq->skb[pos] = skb;
+ writel(addr, priv->port_base + IQ_ADDR);
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->head = pos;
+}
+
+#ifdef FEMAC_RX_REFILL_IN_IRQ
+static void femac_recv_queue(struct net_device *dev, struct sk_buff *skb,
+ u32 rx_pkt_info)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ int hdr_csum_done, hdr_csum_err;
+ int payload_csum_done, payload_csum_err;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ if (dev->features & NETIF_F_RXCSUM) {
+ hdr_csum_done =
+ (rx_pkt_info >> BITS_HEADER_DONE_OFFSET) &
+ BITS_HEADER_DONE_MASK;
+ payload_csum_done =
+ (rx_pkt_info >> BITS_PAYLOAD_DONE_OFFSET) &
+ BITS_PAYLOAD_DONE_MASK;
+ hdr_csum_err =
+ (rx_pkt_info >> BITS_HEADER_ERR_OFFSET) &
+ BITS_HEADER_ERR_MASK;
+ payload_csum_err =
+ (rx_pkt_info >> BITS_PAYLOAD_ERR_OFFSET) &
+ BITS_PAYLOAD_ERR_MASK;
+
+ if (hdr_csum_done && payload_csum_done) {
+ if (unlikely(hdr_csum_err)) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_crc_errors++;
+ dev_kfree_skb_any(skb);
+ return;
+ } else if (!payload_csum_err) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+ }
+ skb_queue_tail(&priv->rx_head, skb);
+}
+
+static void femac_pre_receive(struct net_device *dev)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ struct femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb = NULL;
+ u32 rx_pkt_info, pos, len;
+ unsigned long rxflags;
+
+ spin_lock_irqsave(&priv->rxlock, rxflags);
+ pos = rxq->tail;
+ while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
+ rx_pkt_info = readl(priv->port_base + IQFRM_DES);
+ len = rx_pkt_info & RX_FRAME_LEN_MASK;
+ len -= ETH_FCS_LEN;
+
+ /* tell hardware we will deal with this packet */
+ writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
+
+ skb = rxq->skb[pos];
+ if (unlikely(skb == NULL)) {
+ netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
+ break;
+ }
+ rxq->skb[pos] = NULL;
+
+ dma_unmap_single(priv->dev, rxq->dma_phys[pos], MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ femac_recv_queue(dev, skb, rx_pkt_info);
+next:
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->tail = pos;
+
+ femac_rx_refill(priv);
+ spin_unlock_irqrestore(&priv->rxlock, rxflags);
+}
+
+static u32 femac_rx(struct net_device *dev, int limit)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb = skb_dequeue(&priv->rx_head);
+ u32 rx_pkts_num = 0;
+
+ while (skb != NULL) {
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ rx_pkts_num++;
+
+ if (rx_pkts_num >= limit) {
+ break;
+ }
+ skb = skb_dequeue(&priv->rx_head);
+ }
+
+ return rx_pkts_num;
+}
+#else
+static int femac_recv_queue(struct net_device *dev, struct sk_buff *skb,
+ u32 rx_pkt_info)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ int hdr_csum_done, hdr_csum_err;
+ int payload_csum_done, payload_csum_err;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ if (dev->features & NETIF_F_RXCSUM) {
+ hdr_csum_done =
+ (rx_pkt_info >> BITS_HEADER_DONE_OFFSET) &
+ BITS_HEADER_DONE_MASK;
+ payload_csum_done =
+ (rx_pkt_info >> BITS_PAYLOAD_DONE_OFFSET) &
+ BITS_PAYLOAD_DONE_MASK;
+ hdr_csum_err =
+ (rx_pkt_info >> BITS_HEADER_ERR_OFFSET) &
+ BITS_HEADER_ERR_MASK;
+ payload_csum_err =
+ (rx_pkt_info >> BITS_PAYLOAD_ERR_OFFSET) &
+ BITS_PAYLOAD_ERR_MASK;
+
+ if (hdr_csum_done && payload_csum_done) {
+ if (unlikely(hdr_csum_err)) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_crc_errors++;
+ dev_kfree_skb_any(skb);
+ return -1;
+ } else if (!payload_csum_err) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+ }
+ return 0;
+}
+
+static u32 femac_rx(struct net_device *dev, int limit)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ struct femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ u32 rx_pkt_info, pos, len;
+ u32 rx_pkts_num = 0;
+
+ pos = rxq->tail;
+ while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
+ rx_pkt_info = readl(priv->port_base + IQFRM_DES);
+ len = rx_pkt_info & RX_FRAME_LEN_MASK;
+ len -= ETH_FCS_LEN;
+
+ /* tell hardware we will deal with this packet */
+ writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
+
+ rx_pkts_num++;
+
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
+ break;
+ }
+ rxq->skb[pos] = NULL;
+
+ dma_unmap_single(priv->dev, rxq->dma_phys[pos], MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ if (femac_recv_queue(dev, skb, rx_pkt_info) < 0)
+ goto next;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+next:
+ pos = (pos + 1) % rxq->num;
+ if (rx_pkts_num >= limit) {
+ break;
+ }
+ }
+ rxq->tail = pos;
+
+ femac_rx_refill(priv);
+
+ return rx_pkts_num;
+}
+#endif
+
+static int femac_poll(struct napi_struct *napi, int budget)
+{
+ struct femac_priv *priv = container_of(napi,
+ struct femac_priv, napi);
+ struct net_device *dev = priv->ndev;
+ int work_done = 0;
+ int task = budget;
+ u32 ints, num;
+
+ do {
+#ifdef FEMAC_RX_REFILL_IN_IRQ
+ femac_pre_receive(dev);
+#endif
+ femac_xmit_reclaim(dev);
+ num = femac_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if (work_done >= budget) {
+ break;
+ }
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ femac_irq_enable(priv, DEF_INT_MASK &
+ (~IRQ_INT_TX_PER_PACKET));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t femac_interrupt(int irq, void *dev_id)
+{
+ u32 ints;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct femac_priv *priv = netdev_priv(dev);
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+ if (likely(ints & DEF_INT_MASK)) {
+#ifdef FEMAC_RX_REFILL_IN_IRQ
+ femac_pre_receive(dev);
+#endif
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ femac_irq_disable(priv, DEF_INT_MASK);
+ napi_schedule(&priv->napi);
+ }
+
+ if (has_tso_cap(priv->hw_cap) && unlikely(ints & INT_TX_ERR))
+ femac_get_tso_err_info(priv);
+
+ return IRQ_HANDLED;
+}
+
+static int femac_init_tx_descriptor_ring(struct femac_priv *priv)
+{
+ priv->tx_ring.desc = (struct tx_desc *)dma_zalloc_coherent(priv->dev,
+ TXQ_NUM * sizeof(struct tx_desc), &priv->tx_ring.dma_phys, GFP_KERNEL);
+ if (!priv->tx_ring.desc) {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void femac_destroy_tx_descriptor_ring(struct femac_priv *priv)
+{
+ if (priv->tx_ring.desc)
+ dma_free_coherent(priv->dev, TXQ_NUM * sizeof(struct tx_desc),
+ priv->tx_ring.desc, priv->tx_ring.dma_phys);
+ priv->tx_ring.desc = NULL;
+}
+
+static int femac_init_queue(struct device *dev,
+ struct femac_queue *queue, unsigned int num)
+{
+ queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *), GFP_KERNEL);
+ if (queue->skb == NULL) {
+ return -ENOMEM;
+ }
+
+ queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t), GFP_KERNEL);
+ if (queue->dma_phys == NULL) {
+ return -ENOMEM;
+ }
+
+ queue->num = num;
+ queue->head = 0;
+ queue->tail = 0;
+
+ return 0;
+}
+
+static int femac_init_tx_and_rx_queues(struct femac_priv *priv)
+{
+ int ret;
+
+ ret = femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
+ if (ret) {
+ return ret;
+ }
+
+ ret = femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
+ if (ret) {
+ return ret;
+ }
+
+ priv->tx_fifo_used_cnt = 0;
+
+ return 0;
+}
+
+static void femac_free_skb_rings(struct femac_priv *priv)
+{
+ struct femac_queue *txq = &priv->txq;
+ struct femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb = NULL;
+ dma_addr_t dma_addr;
+ u32 pos;
+
+ pos = rxq->tail;
+ while (pos != rxq->head) {
+ skb = rxq->skb[pos];
+ if (unlikely(skb == NULL)) {
+ netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
+ pos, rxq->head);
+ pos = (pos + 1) % rxq->num;
+ continue;
+ }
+
+ dma_addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ rxq->skb[pos] = NULL;
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->tail = pos;
+
+ pos = txq->tail;
+ while (pos != txq->head) {
+ skb = txq->skb[pos];
+ if (unlikely(skb == NULL)) {
+ netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
+ pos, txq->head);
+ pos = (pos + 1) % txq->num;
+ continue;
+ }
+ femac_tx_dma_unmap(priv, skb, pos);
+ dev_kfree_skb_any(skb);
+ txq->skb[pos] = NULL;
+ pos = (pos + 1) % txq->num;
+ }
+ txq->tail = pos;
+ priv->tx_fifo_used_cnt = 0;
+}
+
+static int femac_set_hw_mac_addr(const struct femac_priv *priv,
+ const unsigned char *mac)
+{
+ u32 reg;
+
+ reg = mac[1] | (mac[0] << 8); /* mac0 is high 8 bits */
+ writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
+ /* addr2 [24 31] addr3 [16 23] addr4 [8 15] addr5 [0 7] */
+ reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
+
+ return 0;
+}
+
+static int femac_port_reset(const struct femac_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_SOFT_RESET);
+ val |= SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ usleep_range(500, 800); /* wait 500-800us */
+
+ val &= ~SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ return 0;
+}
+
+static int femac_net_open(struct net_device *dev)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+
+ femac_set_hw_mac_addr(priv, dev->dev_addr);
+ /*
+ * clear interrupts will drop the first packet MAC have received,
+ * so do it before refill the rx free skbs.
+ */
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ femac_rx_refill(priv);
+
+ netif_carrier_off(dev);
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ priv->link_status = 0;
+ if (dev->phydev)
+ phy_start(dev->phydev);
+
+ femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
+ if (has_tso_cap(priv->hw_cap))
+ femac_irq_enable(priv, INT_TX_ERR);
+
+ return 0;
+}
+
+static void femac_port_init(struct femac_priv *priv);
+
+static int femac_net_close(struct net_device *dev)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+
+ femac_irq_disable(priv, IRQ_ENA_PORT0);
+
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ /*
+ * reset MAC port first before free skb rings
+ * to prevent potential risk of use-after-free.
+ */
+ femac_port_reset(priv);
+ femac_port_init(priv);
+
+ priv->tx_pause_en = false;
+ femac_set_flow_ctrl(priv);
+ femac_free_skb_rings(priv);
+#ifdef FEMAC_RX_REFILL_IN_IRQ
+ skb_queue_purge(&priv->rx_head);
+#endif
+
+ return 0;
+}
+
+static bool femac_net_isready(struct net_device *dev)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ struct femac_queue *txq = &priv->txq;
+ u32 val;
+
+ val = readl(priv->port_base + ADDRQ_STAT);
+ val &= BIT_TX_READY;
+ if (!val) {
+ femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return false;
+ }
+
+ if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
+ txq->num))) {
+ femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return false;
+ }
+
+ return true;
+}
+
+static netdev_tx_t femac_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ struct femac_queue *txq = &priv->txq;
+ dma_addr_t addr;
+ int ret;
+ u32 pkt_info;
+
+ if (!femac_net_isready(dev))
+ return NETDEV_TX_BUSY;
+
+ ret = femac_check_hw_capability(skb);
+ if (unlikely(ret)) {
+ if (ret == -ENOTSUPP)
+ return femac_sw_gso(skb, dev);
+
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ pkt_info = femac_get_pkt_info(skb);
+
+ if (!(skb_is_gso(skb) || skb_shinfo(skb)->nr_frags)) {
+ addr = dma_map_single(priv->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ } else {
+ ret = femac_fill_sg_desc(priv, skb, txq->head);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ addr = priv->tx_ring.dma_phys +
+ txq->head * sizeof(struct tx_desc);
+
+ /* Ensure desc info writen to memory before config hardware */
+ wmb();
+ }
+ txq->dma_phys[txq->head] = addr;
+
+ skb_tx_timestamp(skb);
+
+ txq->skb[txq->head] = skb;
+ txq->head = (txq->head + 1) % txq->num;
+
+ writel(addr, priv->port_base + EQ_ADDR);
+ writel(pkt_info, priv->port_base + EQFRM_LEN);
+
+ priv->tx_fifo_used_cnt++;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static int femac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+ struct sockaddr *skaddr = p;
+
+ if (!is_valid_ether_addr(skaddr->sa_data)) {
+ return -EADDRNOTAVAIL;
+ }
+
+ memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+
+ femac_set_hw_mac_addr(priv, dev->dev_addr);
+
+ return 0;
+}
+
+static void femac_enable_hw_addr_filter(const struct femac_priv *priv,
+ unsigned int reg_n, bool enable)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + glb_mac_h16(reg_n));
+ if (enable) {
+ val |= BIT_MACFLT_ENA;
+ } else {
+ val &= ~BIT_MACFLT_ENA;
+ }
+ writel(val, priv->glb_base + glb_mac_h16(reg_n));
+}
+
+static void femac_set_hw_addr_filter(const struct femac_priv *priv,
+ const unsigned char *addr, unsigned int reg_n)
+{
+ unsigned int high, low;
+ u32 val;
+
+ high = glb_mac_h16(reg_n);
+ low = glb_mac_l32(reg_n);
+ /* addr2 [24 31] addr3 [16 23] addr4 [8 15] addr5 [0 7] */
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ writel(val, priv->glb_base + low);
+
+ val = readl(priv->glb_base + high);
+ val &= ~MACFLT_HI16_MASK;
+ val |= ((addr[0] << 8) | addr[1]); /* addr0 is high 8 bits */
+ val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
+ writel(val, priv->glb_base + high);
+}
+
+static void femac_set_promisc_mode(const struct femac_priv *priv,
+ bool promisc_mode)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ if (promisc_mode) {
+ val |= FWCTRL_FWALL2CPU;
+ } else {
+ val &= ~FWCTRL_FWALL2CPU;
+ }
+ writel(val, priv->glb_base + GLB_FWCTRL);
+}
+
+/* Handle multiple multicast addresses (perfect filtering) */
+static void femac_set_mc_addr_filter(const struct femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ val |= MACTCTRL_MULTI2CPU;
+ } else {
+ int reg = MAX_UNICAST_ADDRESSES;
+ int i;
+ struct netdev_hw_addr *ha = NULL;
+
+ for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
+ femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_mc_addr(ha, dev)
+ {
+ femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_MULTI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+/* Handle multiple unicast addresses (perfect filtering) */
+static void femac_set_uc_addr_filter(const struct femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
+ val |= MACTCTRL_UNI2CPU;
+ } else {
+ int reg = 0;
+ int i;
+ struct netdev_hw_addr *ha = NULL;
+
+ for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
+ femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_uc_addr(ha, dev)
+ {
+ femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_UNI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+static void femac_net_set_rx_mode(struct net_device *dev)
+{
+ struct femac_priv *priv = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ femac_set_promisc_mode(priv, true);
+ } else {
+ femac_set_promisc_mode(priv, false);
+ femac_set_mc_addr_filter(priv);
+ femac_set_uc_addr_filter(priv);
+ }
+}
+
+static int femac_net_ioctl(struct net_device *dev,
+ struct ifreq *ifreq, int cmd)
+{
+ if (!netif_running(dev)) {
+ return -EINVAL;
+ }
+
+ if (!dev->phydev) {
+ return -EINVAL;
+ }
+
+ return phy_mii_ioctl(dev->phydev, ifreq, cmd);
+}
+
+static const struct ethtool_ops femac_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_pauseparam = femac_get_pauseparam,
+ .set_pauseparam = femac_set_pauseparam,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct net_device_ops femac_netdev_ops = {
+ .ndo_open = femac_net_open,
+ .ndo_stop = femac_net_close,
+ .ndo_start_xmit = femac_net_xmit,
+ .ndo_do_ioctl = femac_net_ioctl,
+ .ndo_set_mac_address = femac_set_mac_address,
+ .ndo_set_rx_mode = femac_net_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_features = femac_set_features,
+};
+
+static void femac_verify_flow_ctrl_args(struct femac_priv *priv)
+{
+ if (priv->tx_pause_active_thresh < FC_ACTIVE_MIN ||
+ priv->tx_pause_active_thresh > FC_ACTIVE_MAX)
+ priv->tx_pause_active_thresh = FC_ACTIVE_DEFAULT;
+
+ if (priv->tx_pause_deactive_thresh < FC_DEACTIVE_MIN ||
+ priv->tx_pause_deactive_thresh > FC_DEACTIVE_MAX)
+ priv->tx_pause_deactive_thresh = FC_DEACTIVE_DEFAULT;
+
+ if (priv->tx_pause_active_thresh >= priv->tx_pause_deactive_thresh) {
+ priv->tx_pause_active_thresh = FC_ACTIVE_DEFAULT;
+ priv->tx_pause_deactive_thresh = FC_DEACTIVE_DEFAULT;
+ }
+}
+
+static void femac_core_reset(const struct femac_priv *priv)
+{
+ reset_control_assert(priv->mac_rst);
+ reset_control_deassert(priv->mac_rst);
+}
+
+static void femac_phy_reset(const struct femac_priv *priv)
+{
+ /*
+ * To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /*
+ * delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ femac_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static void femac_port_init(struct femac_priv *priv)
+{
+ u32 val;
+
+ /* MAC gets link status info and phy mode by software config */
+ val = MAC_PORTSEL_STAT_CPU;
+ if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
+ val |= MAC_PORTSEL_RMII;
+ writel(val, priv->port_base + MAC_PORTSEL);
+
+ /* clear all interrupt status */
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
+
+ if (has_tso_cap(priv->hw_cap)) {
+ /* enable TSO debug for error handle */
+ val = readl(priv->port_base + TSO_DBG_EN);
+ val |= BITS_TSO_DBG_EN;
+ writel(val, priv->port_base + TSO_DBG_EN);
+ }
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
+ val |= FWCTRL_FW2CPU_ENA;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+
+ val = readl(priv->port_base + MAC_SET);
+ val &= ~MAX_FRAME_SIZE_MASK;
+ val |= MAX_FRAME_SIZE;
+ writel(val, priv->port_base + MAC_SET);
+
+ val = RX_COALESCED_TIMER |
+ (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
+ writel(val, priv->port_base + RX_COALESCE_SET);
+
+ val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
+ writel(val, priv->port_base + QLEN_SET);
+
+ femac_set_flow_ctrl(priv);
+}
+
+static int femac_drv_res(struct platform_device *pdev,
+ struct femac_priv *priv)
+{
+ struct resource *res = NULL;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ if (of_device_is_compatible(node, "goke,femac-v2"))
+ priv->hw_cap |= HW_CAP_TSO | HW_CAP_RXCSUM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->port_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->port_base)) {
+ ret = PTR_ERR(priv->port_base);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->glb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->glb_base)) {
+ ret = PTR_ERR(priv->glb_base);
+ return ret;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clk\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int femac_drv_mac(struct platform_device *pdev,
+ struct femac_priv *priv)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const char *mac_addr = NULL;
+ struct net_device *ndev = priv->ndev;
+
+ priv->mac_rst = devm_reset_control_get(dev, "mac");
+ if (IS_ERR(priv->mac_rst))
+ return PTR_ERR(priv->mac_rst);
+
+ femac_core_reset(priv);
+
+ mac_addr = of_get_mac_address(node);
+ if (mac_addr != NULL)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+ return 0;
+}
+
+static struct phy_device* femac_drv_phy(struct platform_device *pdev,
+ struct femac_priv *priv, int *ret)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct phy_device* phy = NULL;
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ *ret = of_property_read_u32_array(node, PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays, DELAYS_NUM);
+ if (*ret)
+ return phy;
+ femac_phy_reset(priv);
+ }
+
+ phy_register_fixups();
+
+ phy = of_phy_get_and_connect(priv->ndev, node, femac_adjust_link);
+ if (phy == NULL) {
+ /* check if a fixed-link is defined in device-tree */
+ if (of_phy_is_fixed_link(node)) {
+ *ret = of_phy_register_fixed_link(node);
+ if (*ret < 0) {
+ dev_err(dev, "cannot regitster fixed link phy %d \n", *ret);
+ return phy;
+ }
+ /*
+ * In case of a fixed link phy, the DT node associated
+ * to the phy is the Ethernet MAC DT node.
+ */
+ phy = of_phy_connect(priv->ndev, of_node_get(node), &femac_adjust_link, 0, of_get_phy_mode(node));
+ if (phy == NULL) {
+ dev_err(dev, "fixed_link didnot connect successfully.\n");
+ return phy;
+ }
+ } else {
+ dev_err(dev, "connect to PHY failed!\n");
+ *ret = -ENODEV;
+ return phy;
+ }
+ }
+
+ phy->advertising |= ADVERTISED_Pause;
+ phy->supported |= ADVERTISED_Pause;
+ phy->advertising &= ~(ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half);
+
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id, phy_modes(phy->interface));
+
+ return phy;
+}
+
+static void femac_drv_napi(struct platform_device *pdev,
+ struct femac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ ndev->watchdog_timeo = 6 * HZ; /* 6HZ */
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &femac_netdev_ops;
+ ndev->ethtool_ops = &femac_ethtools_ops;
+ netif_napi_add(ndev, &priv->napi, femac_poll, FEMAC_POLL_WEIGHT);
+
+#ifdef CONFIG_FEPHY_OPT
+ INIT_DELAYED_WORK(&priv->watchdog_queue, femac_watchdog);
+ schedule_delayed_work(&priv->watchdog_queue, FEPHY_OPT_TIMER);
+#endif
+
+ if (has_tso_cap(priv->hw_cap))
+ ndev->hw_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO;
+
+ if (has_rxcsum_cap(priv->hw_cap))
+ ndev->hw_features |= NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features;
+ ndev->vlan_features |= ndev->features;
+
+ device_set_wakeup_capable(priv->dev, true);
+ device_set_wakeup_enable(priv->dev, true);
+
+ priv->tx_pause_en = true;
+ priv->tx_pause_active_thresh = TX_FLOW_CTRL_ACTIVE_THRESHOLD;
+ priv->tx_pause_deactive_thresh = TX_FLOW_CTRL_DEACTIVE_THRESHOLD;
+
+ femac_verify_flow_ctrl_args(priv);
+
+ femac_port_init(priv);
+
+ if (has_rxcsum_cap(priv->hw_cap))
+ femac_enable_rxcsum_drop(priv, true);
+}
+
+static int femac_drv_queues(struct platform_device *pdev,
+ struct femac_priv *priv)
+{
+ int ret;
+
+#ifdef FEMAC_RX_REFILL_IN_IRQ
+ skb_queue_head_init(&priv->rx_head);
+ spin_lock_init(&priv->rxlock);
+#endif
+ ret = femac_init_tx_and_rx_queues(priv);
+ if (ret)
+ return ret;
+
+ if (has_tso_cap(priv->hw_cap)) {
+ ret = femac_init_tx_descriptor_ring(priv);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int femac_drv_register(struct platform_device *pdev,
+ struct femac_priv *priv)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ dev_err(dev, "No irq resource\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, femac_interrupt,
+ IRQF_SHARED, pdev->name, ndev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
+ return ret;;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "register_netdev failed!\n");
+ return ret;;
+ }
+ return 0;
+}
+
+static int femac_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev = NULL;
+ struct femac_priv *priv = NULL;
+ struct phy_device *phy = NULL;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (ndev == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->ndev = ndev;
+
+ ret = femac_drv_res(pdev, priv);
+ if (ret)
+ goto out_free_netdev;
+
+ ret = femac_drv_mac(pdev, priv);
+ if (ret)
+ goto out_disable_clk;
+
+ phy = femac_drv_phy(pdev, priv, &ret);
+ if (phy == NULL)
+ goto out_disable_clk;
+
+ femac_drv_napi(pdev, priv);
+
+ ret = femac_drv_queues(pdev, priv);
+ if (ret)
+ goto out_disconnect_phy;
+
+ ret = femac_drv_register(pdev, priv);
+ if (ret)
+ goto out_destroy_descriptor;
+
+ return ret;
+
+out_destroy_descriptor:
+ if (has_tso_cap(priv->hw_cap))
+ femac_destroy_tx_descriptor_ring(priv);
+out_disconnect_phy:
+ netif_napi_del(&priv->napi);
+ phy_disconnect(phy);
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int femac_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct femac_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+ if (has_tso_cap(priv->hw_cap))
+ femac_destroy_tx_descriptor_ring(priv);
+
+ phy_disconnect(ndev->phydev);
+#ifdef CONFIG_FEPHY_OPT
+ cancel_delayed_work_sync(&priv->watchdog_queue);
+#endif
+ clk_disable_unprepare(priv->clk);
+ free_netdev(ndev);
+
+ phy_unregister_fixups();
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct femac_priv *priv = netdev_priv(ndev);
+
+ disable_irq(ndev->irq);
+ if (netif_running(ndev)) {
+ femac_net_close(ndev);
+ netif_device_detach(ndev);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int femac_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct femac_priv *priv = netdev_priv(ndev);
+
+ clk_prepare_enable(priv->clk);
+ if (priv->phy_rst != NULL)
+ femac_phy_reset(priv);
+
+ if (netif_running(ndev)) {
+ femac_port_init(priv);
+ femac_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+ enable_irq(ndev->irq);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id femac_match[] = {
+ {
+ .compatible = "goke,femac",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, femac_match);
+
+static struct platform_driver femac_driver = {
+ .driver = {
+ .name = "femac",
+ .of_match_table = femac_match,
+ },
+ .probe = femac_drv_probe,
+ .remove = femac_drv_remove,
+#ifdef CONFIG_PM
+ .suspend = femac_drv_suspend,
+ .resume = femac_drv_resume,
+#endif
+};
+
+module_platform_driver(femac_driver);
+
+MODULE_DESCRIPTION("Goke Fast Ethernet MAC driver");
+MODULE_AUTHOR("Goke");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:femac");