net/mlx5e: Allow reporting of checksum unnecessary
authorOr Gerlitz <ogerlitz@mellanox.com>
Mon, 23 Sep 2019 12:40:16 +0000 (12:40 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Oct 2019 06:26:03 +0000 (08:26 +0200)
[ Upstream commit b856df28f9230a47669efbdd57896084caadb2b3 ]

Currently we practically never report checksum unnecessary, because
for all IP packets we take the checksum complete path.

Enable non-default runs with reprorting checksum unnecessary, using
an ethtool private flag. This can be useful for performance evals
and other explorations.

Required by downstream patch which fixes XDP checksum.

Fixes: 86994156c736 ("net/mlx5e: XDP fast RX drop bpf programs support")
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index da52e60d4437c406bc5b1bdb3cec7000b86bea9d..d79e177f89901485afb77f255abdd37b19e954a2 100644 (file)
@@ -210,6 +210,7 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
        "tx_cqe_moder",
        "rx_cqe_compress",
        "rx_striding_rq",
+       "rx_no_csum_complete",
 };
 
 enum mlx5e_priv_flag {
@@ -217,6 +218,7 @@ enum mlx5e_priv_flag {
        MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
        MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
        MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
+       MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
 };
 
 #define MLX5E_SET_PFLAG(params, pflag, enable)                 \
@@ -298,6 +300,7 @@ struct mlx5e_dcbx_dp {
 enum {
        MLX5E_RQ_STATE_ENABLED,
        MLX5E_RQ_STATE_AM,
+       MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
 };
 
 struct mlx5e_cq {
index 2b9350f4c7522bd455dfe91ee4771e9ca6f0e4d9..cb79aaea1a69598612cc0a70cae78b87b57b4e31 100644 (file)
@@ -1510,6 +1510,27 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
        return 0;
 }
 
+static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_channels *channels = &priv->channels;
+       struct mlx5e_channel *c;
+       int i;
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               return 0;
+
+       for (i = 0; i < channels->num; i++) {
+               c = channels->c[i];
+               if (enable)
+                       __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+               else
+                       __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+       }
+
+       return 0;
+}
+
 static int mlx5e_handle_pflag(struct net_device *netdev,
                              u32 wanted_flags,
                              enum mlx5e_priv_flag flag,
@@ -1561,6 +1582,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
        err = mlx5e_handle_pflag(netdev, pflags,
                                 MLX5E_PFLAG_RX_STRIDING_RQ,
                                 set_pflag_rx_striding_rq);
+       if (err)
+               goto out;
+
+       err = mlx5e_handle_pflag(netdev, pflags,
+                                MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
+                                set_pflag_rx_no_csum_complete);
 
 out:
        mutex_unlock(&priv->state_lock);
index 83ab2c0e6b61fd90d233a97c18dbdd8e60962727..5e98b31620c1796b68d5dfd42bec6c939c3edd14 100644 (file)
@@ -934,6 +934,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
+       if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)
+               __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+
        return 0;
 
 err_destroy_rq:
@@ -4533,6 +4536,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
                params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
 
        MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
 
        /* RQ */
        /* Prefer Striding RQ, unless any of the following holds:
index 8323534f075a351f2e1cf3bba08993d6b55a9eeb..4851fc575185c7a5e4c2d7956dc3a6b037a28a7a 100644 (file)
@@ -754,6 +754,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                return;
        }
 
+       if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+               goto csum_unnecessary;
+
        /* CQE csum doesn't cover padding octets in short ethernet
         * frames. And the pad field is appended prior to calculating
         * and appending the FCS field.