From c4f922e86c8e0f7c5fe94e0547e9835fc9711f08 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 12 Sep 2023 10:49:36 +0900 Subject: net: renesas: rswitch: Add spin lock protection for irq {un}mask Add spin lock protection for irq {un}mask registers' control. After napi_complete_done() and this protection were applied, a lot of redundant interrupts no longer occur. For example: when "iperf3 -c -R" on R-Car S4-8 Spider Before the patches are applied: about 800,000 times happened After the patches were applied: about 100,000 times happened Fixes: 3590918b5d07 ("net: ethernet: renesas: Add support for "Ethernet Switch"") Signed-off-by: Yoshihiro Shimoda Reviewed-by: Simon Horman Signed-off-by: Paolo Abeni --- drivers/net/ethernet/renesas/rswitch.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/net/ethernet/renesas/rswitch.c') diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 26c8807d7dea..ea9186178091 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -799,6 +799,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct rswitch_private *priv; struct rswitch_device *rdev; + unsigned long flags; int quota = budget; rdev = netdev_priv(ndev); @@ -817,8 +818,10 @@ retry: netif_wake_subqueue(ndev, 0); if (napi_complete_done(napi, budget - quota)) { + spin_lock_irqsave(&priv->lock, flags); rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&priv->lock, flags); } out: @@ -835,8 +838,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev) struct rswitch_device *rdev = netdev_priv(ndev); if (napi_schedule_prep(&rdev->napi)) { + spin_lock(&rdev->priv->lock); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock(&rdev->priv->lock); __napi_schedule(&rdev->napi); } } @@ -1440,14 +1445,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) static int rswitch_open(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); + unsigned long flags; phy_start(ndev->phydev); napi_enable(&rdev->napi); netif_start_queue(ndev); + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&rdev->priv->lock, flags); if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); @@ -1461,6 +1469,7 @@ static int rswitch_stop(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_gwca_ts_info *ts_info, *ts_info2; + unsigned long flags; netif_tx_stop_all_queues(ndev); bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); @@ -1476,8 +1485,10 @@ static int rswitch_stop(struct net_device *ndev) kfree(ts_info); } + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock_irqrestore(&rdev->priv->lock, flags); phy_stop(ndev->phydev); napi_disable(&rdev->napi); @@ -1887,6 +1898,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + spin_lock_init(&priv->lock); attr = soc_device_match(rswitch_soc_no_speed_change); if (attr) -- cgit v1.2.3