sfc: push partner queue for skb->xmit_more
authorMartin Habets <mhabets@solarflare.com>
Mon, 2 Nov 2015 12:51:31 +0000 (12:51 +0000)
committerSasha Levin <sasha.levin@oracle.com>
Mon, 14 Dec 2015 17:17:29 +0000 (12:17 -0500)
[ Upstream commit b2663a4f30e85ec606b806f5135413e6d5c78d1e ]

When the IP stack passes SKBs the sfc driver puts them in 2 different TX
queues (called partners), one for checksummed and one for not checksummed.
If the SKB has xmit_more set the driver will delay pushing the work to the
NIC.

When later it does decide to push the buffers this patch ensures it also
pushes the partner queue, if that also has any delayed work. Before this
fix the work in the partner queue would be left for a long time and cause
a netdev watchdog.

Fixes: 70b33fb ("sfc: add support for skb->xmit_more")
Reported-by: Jianlin Shi <jishi@redhat.com>
Signed-off-by: Martin Habets <mhabets@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c

index a77f05ce832596d0c8eb10ef3a9735f48ba64b55..63ec209cdfd381e8a669ba72b2c72fbbf0dbcc07 100644 (file)
@@ -1344,7 +1344,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
        unsigned int write_ptr;
        efx_qword_t *txd;
 
-       BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+       tx_queue->xmit_more_available = false;
+       if (unlikely(tx_queue->write_count == tx_queue->insert_count))
+               return;
 
        do {
                write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
index 6859437b59fbb688e7e9188b96bdf5a53811a767..b70b865fd19bf694858f511eef2d83d398c4a89a 100644 (file)
@@ -316,7 +316,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
        unsigned write_ptr;
        unsigned old_write_count = tx_queue->write_count;
 
-       BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+       tx_queue->xmit_more_available = false;
+       if (unlikely(tx_queue->write_count == tx_queue->insert_count))
+               return;
 
        do {
                write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
index 9ede32064685fd62834d0626156a2bab8a90c291..eda39c82f7e77fc28d93c490d3139f5ee574935c 100644 (file)
@@ -218,6 +218,7 @@ struct efx_tx_buffer {
  * @tso_packets: Number of packets via the TSO xmit path
  * @pushes: Number of times the TX push feature has been used
  * @pio_packets: Number of times the TX PIO feature has been used
+ * @xmit_more_available: Are any packets waiting to be pushed to the NIC
  * @empty_read_count: If the completion path has seen the queue as empty
  *     and the transmission path has not yet checked this, the value of
  *     @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -250,6 +251,7 @@ struct efx_tx_queue {
        unsigned int tso_packets;
        unsigned int pushes;
        unsigned int pio_packets;
+       bool xmit_more_available;
        /* Statistics to supplement MAC stats */
        unsigned long tx_packets;
 
index aaf2987512b5db7472bdef518c4aa4c510612ef7..e70edc3dea7e083f84142210f8b834e200e45b72 100644 (file)
@@ -431,8 +431,20 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        efx_tx_maybe_stop_queue(tx_queue);
 
        /* Pass off to hardware */
-       if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+       if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+               struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+               /* There could be packets left on the partner queue if those
+                * SKBs had skb->xmit_more set. If we do not push those they
+                * could be left for a long time and cause a netdev watchdog.
+                */
+               if (txq2->xmit_more_available)
+                       efx_nic_push_buffers(txq2);
+
                efx_nic_push_buffers(tx_queue);
+       } else {
+               tx_queue->xmit_more_available = skb->xmit_more;
+       }
 
        tx_queue->tx_packets++;
 
@@ -721,6 +733,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
        tx_queue->read_count = 0;
        tx_queue->old_read_count = 0;
        tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+       tx_queue->xmit_more_available = false;
 
        /* Set up TX descriptor ring */
        efx_nic_init_tx(tx_queue);
@@ -746,6 +759,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 
                ++tx_queue->read_count;
        }
+       tx_queue->xmit_more_available = false;
        netdev_tx_reset_queue(tx_queue->core_txq);
 }
 
@@ -1301,8 +1315,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        efx_tx_maybe_stop_queue(tx_queue);
 
        /* Pass off to hardware */
-       if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+       if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+               struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+               /* There could be packets left on the partner queue if those
+                * SKBs had skb->xmit_more set. If we do not push those they
+                * could be left for a long time and cause a netdev watchdog.
+                */
+               if (txq2->xmit_more_available)
+                       efx_nic_push_buffers(txq2);
+
                efx_nic_push_buffers(tx_queue);
+       } else {
+               tx_queue->xmit_more_available = skb->xmit_more;
+       }
 
        tx_queue->tso_bursts++;
        return NETDEV_TX_OK;