aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c443
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c734
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c23
80 files changed, 2304 insertions, 1203 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index f8f0a712c943..4957412ff1f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -156,15 +156,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token;
}
-static int cmd_alloc_index(struct mlx5_cmd *cmd)
+static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
- if (ret < cmd->vars.max_reg_cmds)
+ if (ret < cmd->vars.max_reg_cmds) {
clear_bit(ret, &cmd->vars.bitmask);
+ ent->idx = ret;
+ cmd->ent_arr[ent->idx] = ent;
+ }
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
@@ -979,7 +982,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
down(sem);
if (!ent->page_queue) {
- alloc_ret = cmd_alloc_index(cmd);
+ alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
@@ -994,15 +997,14 @@ static void cmd_work_handler(struct work_struct *work)
up(sem);
return;
}
- ent->idx = alloc_ret;
} else {
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
+ cmd->ent_arr[ent->idx] = ent;
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
- cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -1921,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
{
const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
+ unsigned long flags;
if (!err || !(strcmp(namep, "unknown command opcode")))
return;
@@ -1928,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats = xa_load(&dev->cmd.stats, opcode);
if (!stats)
return;
- spin_lock_irq(&stats->lock);
+ spin_lock_irqsave(&stats->lock, flags);
stats->failed++;
if (err < 0)
stats->last_failed_errno = -err;
@@ -1937,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats->last_failed_mbox_status = status;
stats->last_failed_syndrome = syndrome;
}
- spin_unlock_irq(&stats->lock);
+ spin_unlock_irqrestore(&stats->lock, flags);
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index cf0477f53dc4..47e7c2639774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev)
return false;
if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
- mlx5_core_warn(dev, "Missing SyncE capability\n");
+ mlx5_core_dbg(dev, "Missing SyncE capability\n");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3e064234f6fe..98d4306929f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
+ !dev->priv.fw_reset) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
+ return -EOPNOTSUPP;
+ }
+
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
index 28d02749d3c4..7659ad21e6e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
@@ -55,7 +55,10 @@ int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data)
ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET,
MLX5_VSC_LOCK);
if (ret) {
- mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
+ if (ret == -EBUSY)
+ mlx5_core_info(dev, "SW reset semaphore is already in use\n");
+ else
+ mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
goto unlock_gw;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 76d27d2ee40c..080e7eab52c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
while (block_timestamp > tracer->last_timestamp) {
/* Check block override if it's not the first block */
- if (!tracer->last_timestamp) {
+ if (tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
* wraparound, the time stamp of the previous block
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 2cd81bb32c66..904e08de852e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -36,11 +36,18 @@ static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
return 0;
}
+struct mlx5_dpll_synce_status {
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ bool ho_acq;
+ bool oper_freq_measure;
+ enum mlx5_msees_failure_reason failure_reason;
+ s32 frequency_diff;
+};
+
static int
mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
- enum mlx5_msees_admin_status *admin_status,
- enum mlx5_msees_oper_status *oper_status,
- bool *ho_acq)
+ struct mlx5_dpll_synce_status *synce_status)
{
u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
@@ -50,11 +57,12 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
MLX5_REG_MSEES, 0, 0);
if (err)
return err;
- if (admin_status)
- *admin_status = MLX5_GET(msees_reg, out, admin_status);
- *oper_status = MLX5_GET(msees_reg, out, oper_status);
- if (ho_acq)
- *ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ synce_status->admin_status = MLX5_GET(msees_reg, out, admin_status);
+ synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
+ synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
+ synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason);
+ synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
return 0;
}
@@ -67,21 +75,23 @@ mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
MLX5_SET(msees_reg, in, field_select,
MLX5_MSEES_FIELD_SELECT_ENABLE |
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE |
MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
MLX5_SET(msees_reg, in, admin_status, admin_status);
+ MLX5_SET(msees_reg, in, admin_freq_measure, true);
return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MSEES, 0, 1);
}
static enum dpll_lock_status
-mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
+mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
{
- switch (oper_status) {
+ switch (synce_status->oper_status) {
case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
fallthrough;
case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
- return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
- DPLL_LOCK_STATUS_LOCKED;
+ return synce_status->ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
+ DPLL_LOCK_STATUS_LOCKED;
case MLX5_MSEES_OPER_STATUS_HOLDOVER:
fallthrough;
case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
@@ -91,32 +101,60 @@ mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
}
}
+static enum dpll_lock_status_error
+mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status)
+{
+ switch (synce_status->oper_status) {
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING:
+ switch (synce_status->failure_reason) {
+ case MLX5_MSEES_FAILURE_REASON_PORT_DOWN:
+ return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN;
+ case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF:
+ return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH;
+ default:
+ return DPLL_LOCK_STATUS_ERROR_UNDEFINED;
+ }
+ default:
+ return DPLL_LOCK_STATUS_ERROR_NONE;
+ }
+}
+
static enum dpll_pin_state
-mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status,
- enum mlx5_msees_oper_status oper_status)
+mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
{
- return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
- (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
- oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
+ return (synce_status->admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
+ (synce_status->oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
+ synce_status->oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
}
-static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
+ s64 *ffo)
{
- enum mlx5_msees_oper_status oper_status;
+ if (!synce_status->oper_freq_measure)
+ return -ENODATA;
+ *ffo = synce_status->frequency_diff;
+ return 0;
+}
+
+static int
+mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = priv;
- bool ho_acq;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL,
- &oper_status, &ho_acq);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
return err;
-
- *status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ *status = mlx5_dpll_lock_status_get(&synce_status);
+ *status_error = mlx5_dpll_lock_status_error_get(&synce_status);
return 0;
}
@@ -128,18 +166,9 @@ static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
return 0;
}
-static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll,
- void *priv,
- enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- return mode == DPLL_MODE_MANUAL;
-}
-
static const struct dpll_device_ops mlx5_dpll_device_ops = {
.lock_status_get = mlx5_dpll_device_lock_status_get,
.mode_get = mlx5_dpll_device_mode_get,
- .mode_supported = mlx5_dpll_device_mode_supported,
};
static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
@@ -160,16 +189,14 @@ static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
enum dpll_pin_state *state,
struct netlink_ext_ack *extack)
{
- enum mlx5_msees_admin_status admin_status;
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = pin_priv;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
- &oper_status, NULL);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
return err;
- *state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ *state = mlx5_dpll_pin_state_get(&synce_status);
return 0;
}
@@ -188,10 +215,25 @@ static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
}
+static int mlx5_dpll_ffo_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack)
+{
+ struct mlx5_dpll_synce_status synce_status;
+ struct mlx5_dpll *mdpll = pin_priv;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
+ if (err)
+ return err;
+ return mlx5_dpll_pin_ffo_get(&synce_status, ffo);
+}
+
static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
.direction_get = mlx5_dpll_pin_direction_get,
.state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
.state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
+ .ffo_get = mlx5_dpll_ffo_get,
};
static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
@@ -211,19 +253,16 @@ static void mlx5_dpll_periodic_work(struct work_struct *work)
{
struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
work.work);
- enum mlx5_msees_admin_status admin_status;
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
enum dpll_lock_status lock_status;
enum dpll_pin_state pin_state;
- bool ho_acq;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
- &oper_status, &ho_acq);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
goto err_out;
- lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
- pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ lock_status = mlx5_dpll_lock_status_get(&synce_status);
+ pin_state = mlx5_dpll_pin_state_get(&synce_status);
if (!mdpll->last.valid)
goto invalid_out;
@@ -246,7 +285,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
{
if (mdpll->tracking_netdev)
return;
- netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
mdpll->tracking_netdev = netdev;
}
@@ -254,7 +293,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
{
if (!mdpll->tracking_netdev)
return;
- netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ dpll_netdev_pin_clear(mdpll->tracking_netdev);
mdpll->tracking_netdev = NULL;
}
@@ -374,7 +413,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev)
struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
struct mlx5_core_dev *mdev = mdpll->mdev;
- cancel_delayed_work(&mdpll->work);
+ cancel_delayed_work_sync(&mdpll->work);
mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
destroy_workqueue(mdpll->wq);
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b2a5da9739d2..55c6ace0acd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -72,7 +72,6 @@ struct page_pool;
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
-#define MLX5E_MAX_NUM_TC 8
#define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE
#define MLX5_RX_HEADROOM NET_SKB_PAD
@@ -364,7 +363,7 @@ struct mlx5e_cq {
/* control */
struct net_device *netdev;
struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
+ struct workqueue_struct *workqueue;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
@@ -484,10 +483,12 @@ struct mlx5e_xdp_info_fifo {
struct mlx5e_xdpsq;
struct mlx5e_xmit_data;
+struct xsk_tx_metadata;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- int);
+ int,
+ struct xsk_tx_metadata *);
struct mlx5e_xdpsq {
/* data path */
@@ -756,7 +757,7 @@ struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_xdpsq rq_xdpsq;
- struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq sq[MLX5_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
bool xdp;
@@ -806,7 +807,7 @@ struct mlx5e_channels {
struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
- struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
struct mlx5e_rq_stats xskrq;
struct mlx5e_xdpsq_stats rq_xdpsq;
@@ -816,8 +817,8 @@ struct mlx5e_channel_stats {
struct mlx5e_ptp_stats {
struct mlx5e_ch_stats ch;
- struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
- struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC];
+ struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
@@ -826,6 +827,7 @@ enum {
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
MLX5E_STATE_XDP_ACTIVE,
+ MLX5E_STATE_CHANNELS_ACTIVE,
};
struct mlx5e_modify_sq_param {
@@ -884,7 +886,6 @@ struct mlx5e_priv {
struct mlx5e_rq drop_rq;
struct mlx5e_channels channels;
- u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
@@ -982,6 +983,8 @@ struct mlx5e_profile {
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
int (*max_nch_limit)(struct mlx5_core_dev *mdev);
+ u32 (*get_tisn)(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv,
+ u8 lag_port, u8 tc);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
@@ -989,6 +992,11 @@ struct mlx5e_profile {
u32 features;
};
+u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
+ struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
+ u8 lag_port, u8 tc);
+
#define mlx5e_profile_feature_cap(profile, feature) \
((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature))
@@ -1036,6 +1044,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
struct mlx5e_create_cq_param {
+ struct net_device *netdev;
+ struct workqueue_struct *wq;
struct napi_struct *napi;
struct mlx5e_ch_stats *ch_stats;
int node;
@@ -1043,7 +1053,7 @@ struct mlx5e_create_cq_param {
};
struct mlx5e_cq_param;
-int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
@@ -1114,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);
@@ -1130,8 +1140,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
-int mlx5e_create_tises(struct mlx5e_priv *priv);
-void mlx5e_destroy_tises(struct mlx5e_priv *priv);
int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
@@ -1173,9 +1181,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
-int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
- const u8 hfunc);
+int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh);
+int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index be83ad9db82a..671adbad0a40 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
+ ft->g = NULL;
kvfree(in);
return -ENOMEM;
}
@@ -435,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
+ ft->g = NULL;
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 254c84739046..40c8df111754 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -36,7 +36,7 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
return true;
}
-void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
index e1ac4b3d22fb..6beba7f075c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
@@ -7,6 +7,5 @@
int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv);
-void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv);
#endif /* __MLX5_MONITOR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index e097f336e1c4..5757f4f10c12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{
- /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
- u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 headroom;
+
+ if (no_head_tail_room)
+ return SKB_DATA_ALIGN(hw_mtu);
+ headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
+ bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
+ no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+
+ /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
+ * no_head_tail_room should be set in the case of XDP with Striding RQ
+ * when SKB is not linear. This is because another page is allocated for the linear part.
+ */
+ sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
- /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
+ * to exclude headroom and tailroom from calculations.
+ * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
+ * since packet data buffers don't have headroom and tailroom resreved for the SKB.
+ * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
@@ -669,6 +683,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
{
*ccp = (struct mlx5e_create_cq_param) {
+ .netdev = c->netdev,
+ .wq = c->priv->wq,
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
@@ -1062,8 +1078,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp =
- mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
+ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index bb11e644d24f..a848a7fda66b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
WARN_ON_ONCE(tracker->inuse);
tracker->inuse = true;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_add_tail(&tracker->entry, &list->tracker_list_head);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
static void
@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
WARN_ON_ONCE(!tracker->inuse);
tracker->inuse = false;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_del(&tracker->entry);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
- spin_lock(&cqe_list->tracker_list_lock);
+ spin_lock_bh(&cqe_list->tracker_list_lock);
list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
struct sk_buff *skb =
mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
@@ -170,13 +170,15 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
pos->inuse = false;
list_del(&pos->entry);
}
- spin_unlock(&cqe_list->tracker_list_lock);
+ spin_unlock_bh(&cqe_list->tracker_list_lock);
}
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
+ u8 *md_buff,
+ u8 *md_buff_sz,
int budget)
{
struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
@@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
- mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
+ md_buff[(*md_buff_sz)++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
}
-static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
- struct mlx5_cqwq *cqwq = &cq->wq;
+ int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
+ u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
+ u8 metadata_buff_sz = 0;
+ struct mlx5_cqwq *cqwq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
+ cqwq = &cq->wq;
+
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
return false;
@@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
do {
mlx5_cqwq_pop(cqwq);
- mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
+ mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
+ metadata_buff, &metadata_buff_sz, napi_budget);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
mlx5_cqwq_update_db_record(cqwq);
@@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
/* ensure cq space is freed before enabling more cqes */
wmb();
+ while (metadata_buff_sz > 0)
+ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
+ metadata_buff[--metadata_buff_sz]);
+
mlx5e_txqsq_wake(&ptpsq->txqsq);
return work_done == budget;
@@ -506,9 +518,11 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
for (tc = 0; tc < num_tc; tc++) {
int txq_ix = ix_base + tc;
+ u32 tisn;
- err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- cparams, tc, &c->ptpsq[tc]);
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, tc);
+ err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, &c->ptpsq[tc]);
if (err)
goto close_txqsq;
}
@@ -543,6 +557,8 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
num_tc = mlx5e_get_dcb_num_tc(params);
+ ccp.netdev = c->netdev;
+ ccp.wq = c->priv->wq;
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -553,7 +569,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
- err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_txqsq_cq;
}
@@ -562,7 +578,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
- err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_ts_cq;
@@ -590,6 +606,8 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
struct mlx5e_cq_param *cq_param;
struct mlx5e_cq *cq = &c->rq.cq;
+ ccp.netdev = c->netdev;
+ ccp.wq = c->priv->wq;
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -597,7 +615,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
cq_param = &cparams->rq_param.cqp;
- return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ return mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
}
static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
@@ -917,6 +935,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
mlx5e_trigger_napi_sched(&c->napi);
}
@@ -925,8 +944,10 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
mlx5e_deactivate_rq(&c->rq);
+ }
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 7b700d0f956a..86f1854698b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -49,7 +49,7 @@ enum {
struct mlx5e_ptp {
/* data path */
- struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_ptpsq ptpsq[MLX5_MAX_NUM_TC];
struct mlx5e_rq rq;
struct napi_struct napi;
struct device *pdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 244bc15a42ab..34adf8c3f81a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -77,6 +77,7 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
struct mlx5e_params *params;
struct mlx5e_channel *c;
struct mlx5e_txqsq *sq;
+ u32 tisn;
params = &chs->params;
@@ -123,11 +124,13 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
memset(&param_cq, 0, sizeof(param_cq));
mlx5e_build_sq_param(priv->mdev, params, &param_sq);
mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
- err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
- err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, hw_id,
+
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, 0);
+ err = mlx5e_open_txqsq(c, tisn, txq_ix, params, &param_sq, sq, 0, hw_id,
priv->htb_qos_sq_stats[node_qid]);
if (err)
goto err_close_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index b12fe3c5a258..a55452c69f06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -147,6 +147,20 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
}
}
+static void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ u64 dbytes;
+ u64 dpkts;
+
+ dpkts = priv->stats.rep_stats.vport_rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
+ dbytes = priv->stats.rep_stats.vport_rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
+ mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, &priv->stats.rep_stats);
+ flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
+ FLOW_ACTION_HW_STATS_DELAYED);
+}
+
static
int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index fea8c0a5fe89..4358798d6ce1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -492,11 +492,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
{
- char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_icosq *icosq = rq->icosq;
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_err_ctx err_ctx = {};
+ char icosq_str[32] = {};
err_ctx.ctx = rq;
err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
@@ -505,7 +505,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
if (icosq)
snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
snprintf(err_str, sizeof(err_str),
- "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
+ "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index f63402c48028..1b418095b79a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -197,7 +197,7 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
}
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
- /* attr->dests[].rep is resolved when we handle encap */
+ /* attr->dests[].vport is resolved when we handle encap */
return 0;
}
@@ -270,7 +270,8 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].vport_valid = true;
+ esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 368a95fa77d3..b14cd62edffc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -48,7 +48,8 @@ mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
- u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? TCA_PEDIT_KEY_EX_CMD_SET :
+ TCA_PEDIT_KEY_EX_CMD_ADD;
u8 htype = act->mangle.htype;
int err = -EOPNOTSUPP;
u32 mask, val, offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 4e923a2874ae..b500cc2c9689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
@@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
struct mlx5_flow_spec *spec;
int err;
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
struct mlx5e_post_act_handle *handle;
int err;
+ if (IS_ERR(post_act))
+ return ERR_CAST(post_act);
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 00a04fdd756f..8dfb57f712b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -302,6 +302,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->encap_size = ipv4_encap_size;
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -313,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv4_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -407,6 +408,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
e->encap_size = ipv4_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -418,8 +420,8 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv4_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -570,6 +572,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->encap_size = ipv6_encap_size;
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -581,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv6_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
@@ -674,6 +677,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
e->encap_size = ipv6_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
+ encap_header = NULL;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
@@ -685,8 +689,8 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = e->reformat_type;
- reformat_params.size = ipv6_encap_size;
- reformat_params.data = encap_header;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index b10e40e1a9c1..f1d1e1542e81 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1064,7 +1064,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].vport_valid = true;
+ esw_attr->dests[out_index].vport = rpriv->rep->vport;
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 5620d9f97518..ac458a8d10e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -68,11 +68,13 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
node = dev_to_node(mdev->device);
+ ccp.netdev = priv->netdev;
+ ccp.wq = priv->wq;
ccp.node = node;
ccp.ch_stats = t->stats;
ccp.napi = &t->napi;
ccp.ix = 0;
- err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
+ err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 7decc81ed33a..82b5ca1be4f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -103,7 +103,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
return false;
/* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
@@ -145,7 +145,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
return false;
/* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
@@ -256,9 +256,55 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
return 0;
}
+static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+ const struct mlx5_cqe64 *cqe = _ctx->cqe;
+
+ if (!cqe_has_vlan(cqe))
+ return -ENODATA;
+
+ *vlan_proto = htons(ETH_P_8021Q);
+ *vlan_tci = be16_to_cpu(cqe->vlan_info);
+ return 0;
+}
+
const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
.xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
.xmo_rx_hash = mlx5e_xdp_rx_hash,
+ .xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag,
+};
+
+struct mlx5e_xsk_tx_complete {
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_cq *cq;
+};
+
+static u64 mlx5e_xsk_fill_timestamp(void *_priv)
+{
+ struct mlx5e_xsk_tx_complete *priv = _priv;
+ u64 ts;
+
+ ts = get_cqe_ts(priv->cqe);
+
+ if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
+ return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+
+ return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+}
+
+static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
+{
+ struct mlx5_wqe_eth_seg *eseg = priv;
+
+ /* HW/FW is doing parsing, so offsets are largely ignored. */
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+}
+
+const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = {
+ .tmo_fill_timestamp = mlx5e_xsk_fill_timestamp,
+ .tmo_request_checksum = mlx5e_xsk_request_checksum,
};
/* returns true if packet was consumed by xdp */
@@ -398,11 +444,11 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result);
+ int check_result, struct xsk_tx_metadata *meta);
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result)
+ int check_result, struct xsk_tx_metadata *meta)
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -420,7 +466,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
*/
if (unlikely(sq->mpwqe.wqe))
mlx5e_xdp_mpwqe_complete(sq);
- return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
+ return mlx5e_xmit_xdp_frame(sq, xdptxd, 0, meta);
}
if (!xdptxd->len) {
skb_frag_t *frag = &xdptxdf->sinfo->frags[0];
@@ -450,6 +496,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
* and it's safe to complete it at any time.
*/
mlx5e_xdp_mpwqe_session_start(sq);
+ xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, &session->wqe->eth);
}
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
@@ -480,7 +527,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result)
+ int check_result, struct xsk_tx_metadata *meta)
{
struct mlx5e_xmit_data_frags *xdptxdf =
container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
@@ -493,6 +540,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
dma_addr_t dma_addr = xdptxd->dma_addr;
u32 dma_len = xdptxd->len;
u16 ds_cnt, inline_hdr_sz;
+ unsigned int frags_size;
u8 num_wqebbs = 1;
int num_frags = 0;
bool inline_ok;
@@ -503,8 +551,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
dma_len >= MLX5E_XDP_MIN_INLINE;
+ frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0;
- if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
+ if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) {
stats->err++;
return false;
}
@@ -599,6 +648,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
sq->pc++;
}
+ xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
+
sq->doorbell_cseg = cseg;
stats->xmit++;
@@ -608,7 +659,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
- struct xdp_frame_bulk *bq)
+ struct xdp_frame_bulk *bq,
+ struct mlx5e_cq *cq,
+ struct mlx5_cqe64 *cqe)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
@@ -668,10 +721,24 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
break;
}
- case MLX5E_XDP_XMIT_MODE_XSK:
+ case MLX5E_XDP_XMIT_MODE_XSK: {
/* AF_XDP send */
+ struct xsk_tx_metadata_compl *compl = NULL;
+ struct mlx5e_xsk_tx_complete priv = {
+ .cqe = cqe,
+ .cq = cq,
+ };
+
+ if (xp_tx_metadata_enabled(sq->xsk_pool)) {
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ compl = &xdpi.xsk_meta;
+
+ xsk_tx_metadata_complete(compl, &mlx5e_xsk_tx_metadata_ops, &priv);
+ }
+
(*xsk_frames)++;
break;
+ }
default:
WARN_ON_ONCE(true);
}
@@ -720,7 +787,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, cq, cqe);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@ -767,7 +834,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, NULL, NULL);
}
xdp_flush_frame_bulk(&bq);
@@ -840,7 +907,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL);
if (unlikely(!ret)) {
int j;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index ecfe93a479da..e054db1e10f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -33,6 +33,7 @@
#define __MLX5_EN_XDP_H__
#include <linux/indirect_call_wrapper.h>
+#include <net/xdp_sock.h>
#include "en.h"
#include "en/txrx.h"
@@ -82,7 +83,7 @@ enum mlx5e_xdp_xmit_mode {
* num, page_1, page_2, ... , page_num.
*
* MLX5E_XDP_XMIT_MODE_XSK:
- * none.
+ * frame.xsk_meta.
*/
#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4
@@ -97,6 +98,7 @@ union mlx5e_xdp_info {
u8 num;
struct page *page;
} page;
+ struct xsk_tx_metadata_compl xsk_meta;
};
struct mlx5e_xsk_param;
@@ -112,13 +114,16 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
+extern const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops;
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- int check_result));
+ int check_result,
+ struct xsk_tx_metadata *meta));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- int check_result));
+ int check_result,
+ struct xsk_tx_metadata *meta));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 36826b582484..82e6abbc1734 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -127,7 +127,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
- err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
@@ -136,7 +136,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rx_cq;
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 597f319d4770..a59199ed590d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -55,12 +55,16 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi);
+ if (xp_tx_metadata_enabled(sq->xsk_pool))
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .xsk_meta = {} });
sq->doorbell_cseg = &nopwqe->ctrl;
}
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
struct xsk_buff_pool *pool = sq->xsk_pool;
+ struct xsk_tx_metadata *meta = NULL;
union mlx5e_xdp_info xdpi;
bool work_done = true;
bool flush = false;
@@ -93,12 +97,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr);
xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr);
xdptxd.len = desc.len;
+ meta = xsk_buff_get_metadata(pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd,
- check_result);
+ check_result, meta);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
@@ -106,6 +111,16 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xsk_tx_post_err(sq, &xdpi);
} else {
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+ if (xp_tx_metadata_enabled(sq->xsk_pool)) {
+ struct xsk_tx_metadata_compl compl;
+
+ xsk_tx_metadata_to_compl(meta, &compl);
+ XSK_TX_COMPL_FITS(void *);
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info)
+ { .xsk_meta = compl });
+ }
}
flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 655496598c68..c54fd01ea635 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
- sa_entry->esn_state.esn = esn;
+ if (sa_entry->esn_state.esn_msb)
+ sa_entry->esn_state.esn = esn;
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = max_t(u32, esn, 1);
sa_entry->esn_state.esn_msb = esn_msb;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
@@ -329,15 +336,41 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
+ attrs->dir = x->xso.dir;
+
/* esn */
if (x->props.flags & XFRM_STATE_ESN) {
attrs->replay_esn.trigger = true;
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ goto skip_replay_window;
+
+ switch (x->replay_esn->replay_window) {
+ case 32:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ case 64:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 128:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 256:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ default:
+ WARN_ON(true);
+ return;
+ }
}
- attrs->dir = x->xso.dir;
+skip_replay_window:
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
@@ -473,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+ if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
+ x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
@@ -907,9 +941,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ if (ipsec->netevent_nb.notifier_call) {
unregister_netevent_notifier(&ipsec->netevent_nb);
- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ ipsec->netevent_nb.notifier_call = NULL;
+ }
+ if (ipsec->aso)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
@@ -948,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work);
}
-static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+ x->stats.integrity_failed += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return;
+
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+ x->stats.replay += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ }
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
@@ -1018,6 +1074,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
}
}
+ if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
+ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+ NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1113,16 +1175,8 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
-};
-static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
- .xdo_dev_state_add = mlx5e_xfrm_add_state,
- .xdo_dev_state_delete = mlx5e_xfrm_del_state,
- .xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
- .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
-
- .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
@@ -1138,11 +1192,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
- netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
- else
- netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
-
+ netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->features |= NETIF_F_HW_ESP;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8f4a37bceaf4..7d943e93cf6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
- atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
@@ -189,11 +188,19 @@ struct mlx5e_ipsec_ft {
u32 refcnt;
};
+struct mlx5e_ipsec_drop {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
+};
+
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
+ struct mlx5e_ipsec_drop replay;
+ struct mlx5e_ipsec_drop auth;
+ struct mlx5e_ipsec_drop trailer;
};
struct mlx5e_ipsec_miss {
@@ -201,19 +208,6 @@ struct mlx5e_ipsec_miss {
struct mlx5_flow_handle *rule;
};
-struct mlx5e_ipsec_rx {
- struct mlx5e_ipsec_ft ft;
- struct mlx5e_ipsec_miss pol;
- struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_miss status_drop;
- struct mlx5_fc *status_drop_cnt;
- struct mlx5e_ipsec_fc *fc;
- struct mlx5_fs_chains *chains;
- u8 allow_tunnel_mode : 1;
- struct xarray ipsec_obj_id_map;
-};
-
struct mlx5e_ipsec_tx_create_attr {
int prio;
int pol_level;
@@ -248,6 +242,7 @@ struct mlx5e_ipsec {
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
struct mlx5e_ipsec_mpv_work mpv_work;
+ struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_esn_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f41c976dc33f..41a2543a52cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
+struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *drop_all_group;
+ struct mlx5e_ipsec_drop all;
+};
+
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
+};
+
/* IPsec RX flow steering */
static enum mlx5_traffic_types family2tt(u32 family)
{
@@ -128,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
-static int ipsec_status_rule(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status_drops.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
+ mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+}
+
+static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
{
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ mlx5_del_flow_rules(rx->status.rule);
+
+ if (rx != ipsec->rx_esw)
+ return;
+
+#ifdef CONFIG_MLX5_ESWITCH
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+#endif
+}
+
+static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_modify_hdr *modify_hdr;
- struct mlx5_flow_handle *fte;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
int err;
@@ -143,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
if (!spec)
return -ENOMEM;
- /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
- MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
- MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
- MLX5_SET(copy_action_in, action, src_offset, 0);
- MLX5_SET(copy_action_in, action, length, 7);
- MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(copy_action_in, action, dst_offset, 24);
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+ sa_entry->ipsec_rule.auth.fc = flow_counter;
- modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
- 1, action);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
mlx5_core_err(mdev,
- "fail to alloc ipsec copy modify_header_id err=%d\n", err);
- goto out_spec;
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
}
+ sa_entry->ipsec_rule.auth.rule = rule;
- /* create fte */
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt_2;
+ }
+ sa_entry->ipsec_rule.trailer.fc = flow_counter;
+
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule_2;
+ }
+ sa_entry->ipsec_rule.trailer.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_rule_2:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
+err_cnt_2:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
+err_rule:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ sa_entry->ipsec_rule.replay.rule = rule;
+ sa_entry->ipsec_rule.replay.fc = flow_counter;
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status_drops.drop_all_group = g;
+ rx->status_drops.all.rule = rule;
+ rx->status_drops.all.fc = flow_counter;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ mlx5_destroy_flow_group(g);
+err_out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- flow_act.modify_hdr = modify_hdr;
- fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(fte)) {
- err = PTR_ERR(fte);
- mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
- goto out;
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to add ipsec rx status pass rule, err=%d\n", err);
+ goto err_rule;
}
+ rx->status.rule = rule;
kvfree(spec);
- rx->status.rule = fte;
- rx->status.modify_hdr = modify_hdr;
return 0;
-out:
- mlx5_modify_header_dealloc(mdev, modify_hdr);
-out_spec:
+err_rule:
kvfree(spec);
return err;
}
+static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ ipsec_rx_status_pass_destroy(ipsec, rx);
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ int err;
+
+ err = ipsec_rx_status_drop_all_create(ipsec, rx);
+ if (err)
+ return err;
+
+ err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ if (err)
+ goto err_pass_create;
+
+ return 0;
+
+err_pass_create:
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+ return err;
+}
+
static int ipsec_miss_create(struct mlx5_core_dev *mdev,
struct mlx5_flow_table *ft,
struct mlx5e_ipsec_miss *miss,
@@ -333,12 +597,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- if (rx == ipsec->rx_esw) {
- mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
- } else {
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
- }
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -419,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
@@ -428,10 +687,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- if (rx == ipsec->rx_esw)
- err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
- else
- err = ipsec_status_rule(mdev, rx, dest);
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@@ -956,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec)
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
}
-static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
{
/* SPI number */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+ if (encap) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.inner_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.inner_esp_spi, spi);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.outer_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.outer_esp_spi, spi);
+ }
}
static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
@@ -1052,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
struct mlx5_flow_act *flow_act)
{
enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
+ u8 num_of_actions = 1;
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
switch (dir) {
case XFRM_DEV_OFFLOAD_IN:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
+ MLX5_SET(set_action_in, action[1], data, val);
+ MLX5_SET(set_action_in, action[1], offset, 0);
+ MLX5_SET(set_action_in, action[1], length, 32);
+
+ if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[2], action_type,
+ MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[2], field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
+ MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], offset, 0);
+ MLX5_SET(set_action_in, action[2], length, 32);
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
}
- MLX5_SET(set_action_in, action, data, val);
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action[0], data, val);
+ MLX5_SET(set_action_in, action[0], offset, 0);
+ MLX5_SET(set_action_in, action[0], length, 32);
- modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
@@ -1321,8 +1605,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
else
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
- setup_fte_spi(spec, attrs->spi);
- setup_fte_esp(spec);
+ setup_fte_spi(spec, attrs->spi, attrs->encap);
+ if (!attrs->encap)
+ setup_fte_esp(spec);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1372,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+ if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ err = rx_add_rule_drop_replay(sa_entry, rx);
+ if (err)
+ goto err_add_replay;
+
+ err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
+ if (err)
+ goto err_drop_reason;
+
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
@@ -1380,6 +1674,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
+err_drop_reason:
+ if (sa_entry->ipsec_rule.replay.rule) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
+ }
+err_add_replay:
+ mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
err_add_cnt:
@@ -1428,7 +1729,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- setup_fte_spi(spec, attrs->spi);
+ setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
@@ -1809,8 +2110,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int err = 0;
- if (esw)
- down_write(&esw->mode_lock);
+ if (esw) {
+ err = mlx5_esw_lock(esw);
+ if (err)
+ return err;
+ }
if (mdev->num_block_ipsec) {
err = -EBUSY;
@@ -1821,7 +2125,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
unlock:
if (esw)
- up_write(&esw->mode_lock);
+ mlx5_esw_unlock(esw);
return err;
}
@@ -1838,7 +2142,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
{
- mdev->num_block_tc++;
+ mdev->num_block_tc--;
}
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -1887,6 +2191,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+ mlx5_del_flow_rules(ipsec_rule->trailer.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
+
+ mlx5_del_flow_rules(ipsec_rule->auth.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+
+ if (ipsec_rule->replay.rule) {
+ mlx5_del_flow_rules(ipsec_rule->replay.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
+ }
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
@@ -1957,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
- xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+ xa_destroy(&ipsec->ipsec_obj_id_map);
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
@@ -2020,7 +2335,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
- xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
+ xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index a91f772dc981..6e00afe4671b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -6,6 +6,8 @@
#include "ipsec.h"
#include "lib/crypto.h"
#include "lib/ipsec_fs_roce.h"
+#include "fs_core.h"
+#include "eswitch.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -38,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
- if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
+ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
+ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
+ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
+ is_mdev_legacy_mode(mdev)))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
@@ -95,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
- attrs->replay_esn.replay_window / 64);
+ attrs->replay_esn.replay_window);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
@@ -559,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
+ ipsec->aso = NULL;
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 51a144246ea6..727fa7c18523 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -304,12 +304,6 @@ drop:
return false;
}
-enum {
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
-};
-
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data)
@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
-
- switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
- xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
- break;
- default:
- atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
- }
+ xo->status = CRYPTO_SUCCESS;
}
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err;
}
- *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
-
+ *metadata = ipsec_obj_id;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2ed99772f168..82064614846f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
-#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index e0e36a09721c..dd36b04e30a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index d4ebd8743114..b2cabd6ab86c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *sa,
- bool is_tx, struct net_device *netdev, u32 fs_id)
+static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
fs_id);
- mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
+{
+ mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+}
+
+static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa, bool encrypt,
+ bool is_tx, u32 *fs_id)
+{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
+ if (!macsec_rule)
+ return -ENOMEM;
+
+ sa->macsec_rule = macsec_rule;
+
+ return 0;
+}
+
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
- struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
if (err)
return err;
- rule_attrs.macsec_obj_id = sa->macsec_obj_id;
- rule_attrs.sci = sa->sci;
- rule_attrs.assoc_num = sa->assoc_num;
- rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
-
- macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
- if (!macsec_rule) {
- err = -ENOMEM;
- goto destroy_macsec_object;
+ if (sa->active) {
+ err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
+ if (err)
+ goto destroy_macsec_object;
}
- sa->macsec_rule = macsec_rule;
-
return 0;
destroy_macsec_object:
@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
goto destroy_sa;
macsec_device->tx_sa[assoc_num] = tx_sa;
- if (!secy->operational ||
- assoc_num != tx_sc->encoding_sa ||
- !tx_sa->active)
+ if (!secy->operational)
goto out;
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ if (rx_sa->active)
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
- &rx_sc->sc_xarray_element->fs_id);
+ err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index bb7f86c993e5..c7f542d0b8f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/rps.h>
#include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE
@@ -254,11 +255,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in || !ft->g) {
- kfree(ft->g);
- kvfree(in);
+ if (!ft->g)
return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free_g;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -278,7 +281,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
switch (type) {
@@ -300,7 +303,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -309,7 +312,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
memset(in, 0, inlen);
@@ -318,18 +321,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
kvfree(in);
return 0;
-err:
+err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
kvfree(in);
-
+err_free_g:
+ kfree(ft->g);
+ ft->g = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 41c396e76457..6ed3a32b7e22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -74,7 +74,73 @@ int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
return err;
}
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
+{
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+
+ if (mlx5_lag_is_lacp_owner(mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
+{
+ mlx5_core_destroy_tis(mdev, tisn);
+}
+
+static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
+{
+ int tc, i;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
+ for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
+ mlx5e_destroy_tis(mdev, tisn[i][tc]);
+}
+
+static bool mlx5_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
+}
+
+static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
+{
+ int tc, i;
+ int err;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
+ for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, prio, tc << 1);
+
+ if (mlx5_lag_should_assign_affinity(mdev))
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
+
+ err = mlx5e_create_tis(mdev, in, &tisn[i][tc]);
+ if (err)
+ goto err_close_tises;
+ }
+ }
+
+ return 0;
+
+err_close_tises:
+ for (; i >= 0; i--) {
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(mdev, tisn[i][tc]);
+ tc = MLX5_MAX_NUM_TC;
+ }
+
+ return err;
+}
+
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
@@ -103,6 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
+ if (create_tises) {
+ err = mlx5e_create_tises(mdev, res->tisn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+ goto err_destroy_bfreg;
+ }
+ res->tisn_valid = true;
+ }
+
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
@@ -115,6 +190,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
return 0;
+err_destroy_bfreg:
+ mlx5_free_bfreg(mdev, &res->bfreg);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
@@ -130,6 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
+ if (res->tisn_valid)
+ mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 215261a69255..cc51ce16df14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ int count;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%04d (%.16s)",
- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
- mdev->board_id);
+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+ if (count >= sizeof(drvinfo->fw_version))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev));
+
strscpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}
@@ -1257,27 +1262,29 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u32 rss_context = rxfh->rss_context;
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc);
+ err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
+ rxfh->indir, rxfh->key, &rxfh->hfunc);
mutex_unlock(&priv->state_lock);
return err;
}
-static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc,
- u32 *rss_context, bool delete)
+int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ u32 *rss_context = &rxfh->rss_context;
+ u8 hfunc = rxfh->hfunc;
int err;
mutex_lock(&priv->state_lock);
- if (delete) {
+ if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
@@ -1290,7 +1297,8 @@ static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
goto unlock;
}
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key,
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
+ rxfh->indir, rxfh->key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
unlock:
@@ -1298,25 +1306,6 @@ unlock:
return err;
}
-int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0);
-}
-
-int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int err;
-
- mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
- mutex_unlock(&priv->state_lock);
- return err;
-}
-
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -2393,6 +2382,7 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
@@ -2415,8 +2405,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
- .get_rxfh_context = mlx5e_get_rxfh_context,
- .set_rxfh_context = mlx5e_set_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea58c6917433..be809556b2e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -902,6 +902,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.nid = node;
pp_params.dev = rq->pdev;
pp_params.napi = rq->cq.napi;
+ pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
@@ -1351,6 +1352,17 @@ void mlx5e_close_rq(struct mlx5e_rq *rq)
mlx5e_free_rq(rq);
}
+u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
+ struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
+ u8 lag_port, u8 tc)
+{
+ if (profile->get_tisn)
+ return profile->get_tisn(mdev, priv, lag_port, tc);
+
+ return mdev->mlx5e_res.hw_objs.tisn[lag_port][tc];
+}
+
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
kvfree(sq->db.xdpi_fifo.xi);
@@ -1794,6 +1806,7 @@ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
}
void mlx5e_tx_disable_queue(struct netdev_queue *txq)
@@ -1807,6 +1820,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL);
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
@@ -1919,7 +1933,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
return err;
csp.tis_lst_sz = 1;
- csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
+ csp.tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, 0); /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
@@ -1981,11 +1996,12 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
-static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
+static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ struct workqueue_struct *workqueue,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int err;
u32 i;
@@ -2012,13 +2028,13 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
}
cq->mdev = mdev;
- cq->netdev = priv->netdev;
- cq->priv = priv;
+ cq->netdev = netdev;
+ cq->workqueue = workqueue;
return 0;
}
-static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
+static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
@@ -2029,7 +2045,7 @@ static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(priv, param, cq);
+ err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
@@ -2095,14 +2111,13 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
-int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int err;
- err = mlx5e_alloc_cq(priv, param, ccp, cq);
+ err = mlx5e_alloc_cq(mdev, param, ccp, cq);
if (err)
return err;
@@ -2135,7 +2150,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp,
ccp, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
@@ -2203,12 +2218,15 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
u32 qos_queue_group_id;
+ u32 tisn;
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, tc);
err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
if (err)
goto err_close_sqs;
- err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
+ err = mlx5e_open_txqsq(c, tisn, txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
&c->priv->channel_stats[c->ix]->sq[tc]);
@@ -2336,12 +2354,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err)
return err;
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
@@ -2350,17 +2368,17 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
- err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
- err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
+ err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
@@ -2544,6 +2562,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
+ netif_napi_set_irq(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2586,12 +2605,16 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
+
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
{
int tc;
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, NULL);
+
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
else
@@ -2731,6 +2754,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
+ ASSERT_RTNL();
if (chs->ptp) {
mlx5e_ptp_close(chs->ptp);
chs->ptp = NULL;
@@ -3012,17 +3036,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_activate_channels(priv);
+ set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
if (priv->rx_res)
mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
+static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
+{
+ WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
+ if (current_work() != &priv->tx_timeout_work)
+ cancel_work_sync(&priv->tx_timeout_work);
+}
+
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
if (priv->rx_res)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
+ clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
+ mlx5e_cancel_tx_timeout_work(priv);
+
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_deactivate_channels(priv);
@@ -3294,7 +3330,7 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(priv, param, cq);
+ return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -3350,75 +3386,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
mlx5e_free_cq(&drop_rq->cq);
}
-int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
-{
- void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
- MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
-
- if (MLX5_GET(tisc, tisc, tls_en))
- MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
-
- if (mlx5_lag_is_lacp_owner(mdev))
- MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
-
- return mlx5_core_create_tis(mdev, in, tisn);
-}
-
-void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
-{
- mlx5_core_destroy_tis(mdev, tisn);
-}
-
-void mlx5e_destroy_tises(struct mlx5e_priv *priv)
-{
- int tc, i;
-
- for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
-}
-
-static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
-}
-
-int mlx5e_create_tises(struct mlx5e_priv *priv)
-{
- int tc, i;
- int err;
-
- for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
- for (tc = 0; tc < priv->profile->max_tc; tc++) {
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
-
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
- MLX5_SET(tisc, tisc, prio, tc << 1);
-
- if (mlx5e_lag_should_assign_affinity(priv->mdev))
- MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
-
- err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
- if (err)
- goto err_close_tises;
- }
- }
-
- return 0;
-
-err_close_tises:
- for (; i >= 0; i--) {
- for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
- tc = priv->profile->max_tc;
- }
-
- return err;
-}
-
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
if (priv->mqprio_rl) {
@@ -3427,7 +3394,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
priv->mqprio_rl = NULL;
}
mlx5e_accel_cleanup_tx(priv);
- mlx5e_destroy_tises(priv);
}
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
@@ -3529,7 +3495,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- if (tc && tc != MLX5E_MAX_NUM_TC)
+ if (tc && tc != MLX5_MAX_NUM_TC)
return -EINVAL;
new_params = priv->channels.params;
@@ -4801,8 +4767,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- rtnl_lock();
- mutex_lock(&priv->state_lock);
+ /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
+ * through this flow. However, channel closing flows have to wait for
+ * this work to finish while holding rtnl lock too. So either get the
+ * lock or find that channels are being closed for other reason and
+ * this work is not relevant anymore.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
+ return;
+ msleep(20);
+ }
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
@@ -4821,7 +4796,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
}
unlock:
- mutex_unlock(&priv->state_lock);
rtnl_unlock();
}
@@ -5164,6 +5138,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
+ netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
mlx5e_dcbnl_build_netdev(netdev);
@@ -5244,7 +5219,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
- netdev->features |= NETIF_F_GSO_UDP_L4;
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
@@ -5484,23 +5458,13 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
- err = mlx5e_create_tises(priv);
- if (err) {
- mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
- return err;
- }
-
err = mlx5e_accel_init_tx(priv);
if (err)
- goto err_destroy_tises;
+ return err;
mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
-
-err_destroy_tises:
- mlx5e_destroy_tises(priv);
- return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
@@ -5595,7 +5559,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
- .max_tc = MLX5E_MAX_NUM_TC,
+ .max_tc = MLX5_MAX_NUM_TC,
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
@@ -6035,7 +5999,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, true);
if (err)
return err;
@@ -6048,7 +6012,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return 0;
}
-static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+static int _mlx5e_suspend(struct auxiliary_device *adev)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
@@ -6066,15 +6030,18 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
return 0;
}
-static int mlx5e_probe(struct auxiliary_device *adev,
- const struct auxiliary_device_id *id)
+static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+{
+ return _mlx5e_suspend(adev);
+}
+
+static int _mlx5e_probe(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5e_dev *mlx5e_dev;
struct net_device *netdev;
- pm_message_t state = {};
struct mlx5e_priv *priv;
int err;
@@ -6129,7 +6096,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
return 0;
err_resume:
- mlx5e_suspend(adev, state);
+ _mlx5e_suspend(adev);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
@@ -6141,16 +6108,21 @@ err_devlink_unregister:
return err;
}
+static int mlx5e_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ return _mlx5e_probe(adev);
+}
+
static void mlx5e_remove(struct auxiliary_device *adev)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
- pm_message_t state = {};
mlx5_core_uplink_netdev_set(priv->mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
- mlx5e_suspend(adev, state);
+ _mlx5e_suspend(adev);
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 693e55b010d9..05527418fa64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ int count;
strscpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%04d (%.16s)",
- fw_rev_maj(mdev), fw_rev_min(mdev),
- fw_rev_sub(mdev), mdev->board_id);
+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+ if (count >= sizeof(drvinfo->fw_version))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev));
}
static const struct counter_desc sw_rep_stats_desc[] = {
@@ -108,8 +112,18 @@ static const struct counter_desc vport_rep_stats_desc[] = {
tx_vport_rdma_multicast_bytes) },
};
+static const struct counter_desc vport_rep_loopback_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ vport_loopback_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ vport_loopback_bytes) },
+};
+
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
+#define NUM_VPORT_REP_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
+ ARRAY_SIZE(vport_rep_loopback_stats_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
{
@@ -153,7 +167,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
{
- return NUM_VPORT_REP_HW_COUNTERS;
+ return NUM_VPORT_REP_HW_COUNTERS +
+ NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
@@ -162,6 +177,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_rep_loopback_stats_desc[i].format);
return idx;
}
@@ -172,6 +190,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i);
+ for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_loopback_stats_desc, i);
return idx;
}
@@ -243,6 +264,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
rep_stats->tx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, received_ib_multicast.octets);
+ if (MLX5_CAP_GEN(priv->mdev, vport_counter_local_loopback)) {
+ rep_stats->vport_loopback_packets =
+ MLX5_GET_CTR(out, local_loopback.packets);
+ rep_stats->vport_loopback_bytes =
+ MLX5_GET_CTR(out, local_loopback.octets);
+ }
+
out:
kvfree(out);
}
@@ -1152,12 +1180,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
int err;
- err = mlx5e_create_tises(priv);
- if (err) {
- mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
- return err;
- }
-
err = mlx5e_rep_neigh_init(rpriv);
if (err)
goto err_neigh_init;
@@ -1180,7 +1202,6 @@ err_ht_init:
err_init_tx:
mlx5e_rep_neigh_cleanup(rpriv);
err_neigh_init:
- mlx5e_destroy_tises(priv);
return err;
}
@@ -1194,7 +1215,6 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
mlx5e_cleanup_uplink_rep_tx(rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
- mlx5e_destroy_tises(priv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1424,7 +1444,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
- .max_tc = MLX5E_MAX_NUM_TC,
+ .max_tc = MLX5_MAX_NUM_TC,
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
@@ -1493,7 +1513,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
rpriv->rep->vport);
- if (dl_port) {
+ if (!IS_ERR(dl_port)) {
SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
mlx5e_rep_vnic_reporter_create(priv, dl_port);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 8d9743a5e42c..d601b5faaed5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -298,8 +298,8 @@ static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
struct page *page = frag_page->page;
- if (page_pool_defrag_page(page, drain_count) == 0)
- page_pool_put_defragged_page(rq->page_pool, page, -1, true);
+ if (page_pool_unref_page(page, drain_count) == 0)
+ page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -1039,7 +1039,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- queue_work(cq->priv->wq, &sq->recover_work);
+ queue_work(cq->workqueue, &sq->recover_work);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 477c547dcc04..12b3607afecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -476,6 +476,8 @@ struct mlx5e_rep_stats {
u64 tx_vport_rdma_multicast_packets;
u64 rx_vport_rdma_multicast_bytes;
u64 tx_vport_rdma_multicast_bytes;
+ u64 vport_loopback_packets;
+ u64 vport_loopback_bytes;
};
struct mlx5e_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9a5a5c2c7da9..9fb2c057bd78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
enum mlx5e_post_meter_type type;
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
@@ -758,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, priv->max_nch));
+ mlx5e_rqt_size(mdev, hp->num_channels));
if (err)
return err;
@@ -2011,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
+
+ list_del(&peer_flow->peer_flows);
if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
- list_del(&peer_flow->peer_flows);
kfree(peer_flow);
}
}
@@ -3147,7 +3151,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
- OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
+ OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
@@ -3158,21 +3162,31 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
-static unsigned long mask_to_le(unsigned long mask, int size)
+static u32 mask_field_get(void *mask, struct mlx5_fields *f)
{
- __be32 mask_be32;
- __be16 mask_be16;
-
- if (size == 32) {
- mask_be32 = (__force __be32)(mask);
- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
- } else if (size == 16) {
- mask_be32 = (__force __be32)(mask);
- mask_be16 = *(__be16 *)&mask_be32;
- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
+ switch (f->field_bsize) {
+ case 32:
+ return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
+ case 16:
+ return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
+ default:
+ return *(u8 *)mask & (u8)f->field_mask;
}
+}
- return mask;
+static void mask_field_clear(void *mask, struct mlx5_fields *f)
+{
+ switch (f->field_bsize) {
+ case 32:
+ *(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
+ break;
+ case 16:
+ *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
+ break;
+ default:
+ *(u8 *)mask &= ~(u8)f->field_mask;
+ break;
+ }
}
static int offload_pedit_fields(struct mlx5e_priv *priv,
@@ -3184,35 +3198,32 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
struct pedit_headers_action *hdrs = parse_attr->hdrs;
void *headers_c, *headers_v, *action, *vals_p;
- u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
- unsigned long mask, field_mask;
+ void *s_masks_p, *a_masks_p;
int i, first, last, next_z;
struct mlx5_fields *f;
+ unsigned long mask;
+ u32 s_mask, a_mask;
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
- set_masks = &hdrs[0].masks;
- add_masks = &hdrs[1].masks;
- set_vals = &hdrs[0].vals;
- add_vals = &hdrs[1].vals;
+ set_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].masks;
+ add_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].masks;
+ set_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].vals;
+ add_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].vals;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
f = &fields[i];
- /* avoid seeing bits set from previous iterations */
- s_mask = 0;
- a_mask = 0;
-
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
- s_mask = *s_masks_p & f->field_mask;
- a_mask = *a_masks_p & f->field_mask;
+ s_mask = mask_field_get(s_masks_p, f);
+ a_mask = mask_field_get(a_masks_p, f);
if (!s_mask && !a_mask) /* nothing to offload here */
continue;
@@ -3239,22 +3250,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
match_mask, f->field_bsize))
skip = true;
/* clear to denote we consumed this field */
- *s_masks_p &= ~f->field_mask;
+ mask_field_clear(s_masks_p, f);
} else {
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */
- if ((*(u32 *)vals_p & f->field_mask) == 0)
+ if (!mask_field_get(vals_p, f))
skip = true;
/* clear to denote we consumed this field */
- *a_masks_p &= ~f->field_mask;
+ mask_field_clear(a_masks_p, f);
}
if (skip)
continue;
- mask = mask_to_le(mask, f->field_bsize);
-
first = find_first_bit(&mask, f->field_bsize);
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
last = find_last_bit(&mask, f->field_bsize);
@@ -3281,10 +3290,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
+ unsigned long field_mask = f->field_mask;
int start;
- field_mask = mask_to_le(f->field_mask, f->field_bsize);
-
/* if field is bit sized it can start not from first bit */
start = find_first_bit(&field_mask, f->field_bsize);
@@ -3735,6 +3743,20 @@ out_free:
}
static int
+set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
+
+ return 0;
+}
+
+static int
alloc_branch_attr(struct mlx5e_tc_flow *flow,
struct mlx5e_tc_act_branch_ctrl *cond,
struct mlx5_flow_attr **cond_attr,
@@ -3757,8 +3779,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
break;
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_PIPE:
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ err = set_branch_dest_ft(flow->priv, attr);
+ if (err)
+ goto out_err;
break;
case FLOW_ACTION_JUMP:
if (*jump_count) {
@@ -3767,8 +3790,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
goto out_err;
}
*jump_count = cond->extval;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ err = set_branch_dest_ft(flow->priv, attr);
+ if (err)
+ goto out_err;
break;
default:
err = -EOPNOTSUPP;
@@ -5007,22 +5031,6 @@ int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
return apply_police_params(priv, 0, extack);
}
-void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct rtnl_link_stats64 cur_stats;
- u64 dbytes;
- u64 dpkts;
-
- mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
- dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
- dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
- rpriv->prev_vf_vport_stats = cur_stats;
- flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
- FLOW_ACTION_HW_STATS_DELAYED);
-}
-
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
@@ -5715,8 +5723,10 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a
esw = priv->mdev->priv.eswitch;
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
- if (IS_ERR(attr->act_id_restore_rule))
+ if (IS_ERR(attr->act_id_restore_rule)) {
+ err = PTR_ERR(attr->act_id_restore_rule);
goto err_rule;
+ }
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index adb39e30f90f..c24bda56b2b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -203,8 +203,6 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
-void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index d41435c22ce5..2fa076b23fbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -399,9 +399,11 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
mlx5e_skb_cb_hwtstamp_init(skb);
- mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
+ /* ensure skb is put on metadata_map before tracking the index */
+ wmb();
+ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
netif_tx_stop_queue(sq->txq);
@@ -494,10 +496,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
- dev_kfree_skb_any(skb);
if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
be32_to_cpu(eseg->flow_table_metadata));
+ dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
@@ -861,7 +863,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
- queue_work(cq->priv->wq, &sq->recover_work);
+ queue_work(cq->workqueue, &sq->recover_work);
}
stats->cqe_err++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ea0405e0a43f..40a6cb052a2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_irq *irq;
+ int cpu;
irq = xa_load(&table->comp_irqs, vecidx);
if (!irq)
return;
+ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
+ cpumask_clear_cpu(cpu, &table->used_cpus);
xa_erase(&table->comp_irqs, vecidx);
mlx5_irq_affinity_irq_release(dev, irq);
}
@@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
- irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
- if (IS_ERR(irq)) {
- /* In case SF irq pool does not exist, fallback to the PF irqs*/
- if (PTR_ERR(irq) == -ENOENT)
- return comp_irq_request_pci(dev, vecidx);
+ /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ if (!mlx5_irq_pool_is_sf_pool(pool))
+ return comp_irq_request_pci(dev, vecidx);
+ af_desc.is_managed = 1;
+ cpumask_copy(&af_desc.mask, cpu_online_mask);
+ cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
+ irq = mlx5_irq_affinity_request(pool, &af_desc);
+ if (IS_ERR(irq))
return PTR_ERR(irq);
- }
+
+ cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index a7ed87e9d842..22dd30cf8033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
i++;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, entry->key.addr);
@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 095f31f380fa..5a0047bdcb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -21,158 +21,6 @@ enum {
MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
};
-static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status_drop.rule);
- mlx5_destroy_flow_group(rx->status_drop.group);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
-}
-
-static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-}
-
-static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table *ft = rx->ft.status;
- struct mlx5_core_dev *mdev = ipsec->mdev;
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_fc *flow_counter;
- struct mlx5_flow_spec *spec;
- struct mlx5_flow_group *g;
- u32 *flow_group_in;
- int err = 0;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!flow_group_in || !spec) {
- err = -ENOMEM;
- goto err_out;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
- g = mlx5_create_flow_group(ft, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop flow group, err=%d\n", err);
- goto err_out;
- }
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
- goto err_cnt;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status_drop.group = g;
- rx->status_drop.rule = rule;
- rx->status_drop_cnt = flow_counter;
-
- kvfree(flow_group_in);
- kvfree(spec);
- return 0;
-
-err_rule:
- mlx5_fc_destroy(mdev, flow_counter);
-err_cnt:
- mlx5_destroy_flow_group(g);
-err_out:
- kvfree(flow_group_in);
- kvfree(spec);
- return err;
-}
-
-static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_2.ipsec_syndrome);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.ipsec_syndrome, 0);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(ipsec->mdev,
- "Failed to add ipsec rx status pass rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status.rule = rule;
- kvfree(spec);
- return 0;
-
-err_rule:
- kvfree(spec);
- return err;
-}
-
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- esw_ipsec_rx_status_pass_destroy(ipsec, rx);
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
-}
-
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- int err;
-
- err = esw_ipsec_rx_status_drop_create(ipsec, rx);
- if (err)
- return err;
-
- err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
- if (err)
- goto err_pass_create;
-
- return 0;
-
-err_pass_create:
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
- return err;
-}
-
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
@@ -202,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mapped_id;
int err;
- err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
xa_mk_value(sa_entry->ipsec_obj_id),
XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
if (err)
@@ -233,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
return 0;
err_header_alloc:
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
return err;
}
@@ -242,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (sa_entry->rx_mapped_id)
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ xa_erase_bh(&ipsec->ipsec_obj_id_map,
sa_entry->rx_mapped_id);
}
@@ -252,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
struct mlx5e_ipsec *ipsec = priv->ipsec;
void *val;
- val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ val = xa_load(&ipsec->ipsec_obj_id_map, id);
if (!val)
return -ENOENT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index 0c90f7a8b0d3..ac9c65b89166 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -8,11 +8,6 @@ struct mlx5e_ipsec;
struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx);
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest);
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
@@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
-static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx) {}
-
-static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- return -EINVAL;
-}
-
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8d0b915a3121..3047d7015c52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
- lockdep_assert_held(&esw->mode_lock);
+ devl_assert_locked(priv_to_devlink(esw->dev));
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
@@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
- down_write(&esw->mode_lock);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
@@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
}
}
- up_write(&esw->mode_lock);
-
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
@@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
return;
devl_assert_locked(priv_to_devlink(esw->dev));
- down_write(&esw->mode_lock);
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
*/
if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
- goto unlock;
+ return;
esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
esw->esw_funcs.num_vfs = 0;
else
esw->esw_funcs.num_ec_vfs = 0;
-
-unlock:
- up_write(&esw->mode_lock);
}
/* Free resources for corresponding eswitch mode. It is called by devlink
@@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
- down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
esw->mode = MLX5_ESWITCH_LEGACY;
- up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -2254,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
if (!mlx5_esw_allowed(esw))
return true;
- if (down_read_trylock(&esw->mode_lock) != 0)
+ if (down_read_trylock(&esw->mode_lock) != 0) {
+ if (esw->eswitch_operation_in_progress) {
+ up_read(&esw->mode_lock);
+ return false;
+ }
return true;
+ }
return false;
}
@@ -2312,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
if (down_write_trylock(&esw->mode_lock) == 0)
return -EINVAL;
- if (atomic64_read(&esw->user_count) > 0) {
+ if (esw->eswitch_operation_in_progress ||
+ atomic64_read(&esw->user_count) > 0) {
up_write(&esw->mode_lock);
return -EBUSY;
}
@@ -2320,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
return esw->mode;
}
+int mlx5_esw_lock(struct mlx5_eswitch *esw)
+{
+ down_write(&esw->mode_lock);
+
+ if (esw->eswitch_operation_in_progress) {
+ up_write(&esw->mode_lock);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
* mlx5_esw_unlock() - Release write lock on esw mode lock
* @esw: eswitch device.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 37ab66e7b403..349e28a6dd8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -383,6 +383,7 @@ struct mlx5_eswitch {
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
+ bool eswitch_operation_in_progress;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -525,7 +526,8 @@ struct mlx5_esw_flow_attr {
u8 total_vlan;
struct {
u32 flags;
- struct mlx5_eswitch_rep *rep;
+ bool vport_valid;
+ u16 vport;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
@@ -616,13 +618,6 @@ static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
return esw && MLX5_ESWITCH_MANAGER(esw->dev);
}
-/* The returned number is valid only when the dev is eswitch manager. */
-static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
-{
- return mlx5_core_is_ecpf_esw_manager(dev) ?
- MLX5_VPORT_ECPF : MLX5_VPORT_PF;
-}
-
static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -827,6 +822,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev);
void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
+int mlx5_esw_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b296ac52a439..baaae628b0a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -287,10 +287,9 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
for (i = from; i < to; i++)
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5_chains_put_table(chains, 0, 1, 0);
- else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
esw_attr->dests[i].mdev))
- mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
- false);
+ mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
}
static bool
@@ -358,8 +357,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
* this criteria.
*/
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
- if (esw_attr->dests[i].rep &&
- mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ if (esw_attr->dests[i].vport_valid &&
+ mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
esw_attr->dests[i].mdev)) {
result = true;
} else {
@@ -388,7 +387,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
- esw_attr->dests[j].rep->vport, false);
+ esw_attr->dests[j].vport, false);
if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft);
goto err_indir_tbl_get;
@@ -432,11 +431,11 @@ static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
int attr_idx)
{
if (esw->offloads.ft_ipsec_tx_pol &&
- esw_attr->dests[attr_idx].rep &&
- esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
+ esw_attr->dests[attr_idx].vport_valid &&
+ esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
/* To be aligned with software, encryption is needed only for tunnel device */
(esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
- esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
+ esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
return true;
@@ -469,7 +468,7 @@ esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
+ dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
@@ -536,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
}
static bool
-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{
- bool vf_dest = false, pf_dest = false;
+ bool internal_dest = false, external_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
- if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
continue;
- if (dests[i].vport.num == MLX5_VPORT_UPLINK)
- pf_dest = true;
+ /* Uplink dest is external, but considered as internal
+ * if there is reformat because firmware uses LB+hairpin to support it.
+ */
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
+ !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
+ external_dest = true;
else
- vf_dest = true;
+ internal_dest = true;
- if (vf_dest && pf_dest)
+ if (internal_dest && external_dest)
return true;
}
@@ -696,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
- esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_dests_to_int_external(dest, i)) {
esw_warn(esw->dev,
- "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
@@ -984,7 +988,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
+ if (rep->vport == MLX5_VPORT_UPLINK &&
+ on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -1176,9 +1181,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
struct mlx5_vport *vport;
+ int err, pfindex;
unsigned long i;
void *misc;
- int err;
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
@@ -1254,7 +1259,15 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[vport->index] = flow;
}
}
- esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
+
+ pfindex = mlx5_get_dev_index(peer_dev);
+ if (pfindex >= MLX5_MAX_PORTS) {
+ esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
+ pfindex, MLX5_MAX_PORTS);
+ err = -EINVAL;
+ goto add_ec_vf_flow_err;
+ }
+ esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
kvfree(spec);
return 0;
@@ -3650,18 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
-{
- struct net *devl_net, *netdev_net;
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_nocheck_get(devlink);
- netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
-
- return net_eq(devl_net, netdev_net);
-}
-
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -3706,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
- return -EPERM;
- }
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3732,13 +3726,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
goto unlock;
}
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
- goto unlock;
+ goto skip;
}
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
@@ -3748,6 +3745,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
}
+skip:
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
unlock:
mlx5_esw_unlock(esw);
enable_lag:
@@ -3758,16 +3758,12 @@ enable_lag:
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
- err = esw_mode_to_devlink(esw->mode, mode);
- up_read(&esw->mode_lock);
- return err;
+ return esw_mode_to_devlink(esw->mode, mode);
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
@@ -3861,11 +3857,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
- if (err)
- goto out;
+ if (!err)
+ esw->offloads.inline_mode = mlx5_mode;
- esw->offloads.inline_mode = mlx5_mode;
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
up_write(&esw->mode_lock);
return 0;
@@ -3877,16 +3877,12 @@ out:
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
- err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
- up_read(&esw->mode_lock);
- return err;
+ return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
@@ -3968,6 +3964,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
goto unlock;
}
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
@@ -3981,6 +3980,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw);
}
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
+
unlock:
up_write(&esw->mode_lock);
return err;
@@ -3995,9 +3997,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
- up_read(&esw->mode_lock);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index edd910258314..40bdc677f051 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -233,8 +233,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
/* hairpin */
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
- if (!esw_attr->dest_int_port && esw_attr->dests[i].rep &&
- esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid &&
+ esw_attr->dests[i].vport == MLX5_VPORT_UPLINK)
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a4b925331661..9b8599c200e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
+ MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
+ !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
@@ -1144,3 +1146,37 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
return mlx5_fs_cmd_get_stub_cmds();
}
}
+
+int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {};
+
+ if (silent_mode && !MLX5_CAP_GEN(dev, silent_mode))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
+ MLX5_SET(set_l2_table_entry_in, in, silent_mode_valid, 1);
+ MLX5_SET(set_l2_table_entry_in, in, silent_mode, silent_mode);
+
+ return mlx5_cmd_exec_in(dev, set_l2_table_entry, in);
+}
+
+int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect)
+{
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
+
+ if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+ MLX5_SET(set_flow_table_root_in, in, table_type,
+ FS_FT_NIC_TX);
+ if (disconnect)
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
+ else
+ MLX5_SET(set_flow_table_root_in, in, table_id, ft_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 7790ae5531e1..53e0e5137d3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -122,4 +122,6 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
+int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
+int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 4aed1768b85f..78eb6b7097e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -181,7 +181,7 @@ struct mlx5_flow_rule {
struct mlx5_flow_handle {
int num_rules;
- struct mlx5_flow_rule *rule[];
+ struct mlx5_flow_rule *rule[] __counted_by(num_rules);
};
/* Type of children is mlx5_flow_group */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 17fe30a4c06c..0c26d707eed2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -539,7 +539,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
- struct mlx5_fc fcs[];
+ struct mlx5_fc fcs[] __counted_by(bulk_len);
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 58f4c0d0fafa..e7faf7e73ca4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
return -EIO;
}
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED);
/* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
cond_resched();
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index b568988e92e3..2911aa34a5be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -325,6 +325,48 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *bridge = dev->pdev->bus->self;
+ u16 reg16;
+ int err;
+
+ if (!bridge)
+ return -EOPNOTSUPP;
+
+ err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
+ if (err)
+ return err;
+
+ if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) {
+ mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct pci_device_id mgt_ifc_device_ids[] = {
+ { PCI_VDEVICE(MELLANOX, 0xc2d2) }, /* BlueField1 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d3) }, /* BlueField2 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d4) }, /* BlueField3-Lx MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d5) }, /* BlueField3 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d6) }, /* BlueField4 MGT interface device ID */
+};
+
+static bool mlx5_is_mgt_ifc_pci_device(struct mlx5_core_dev *dev, u16 dev_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mgt_ifc_device_ids); ++i)
+ if (mgt_ifc_device_ids[i].device == dev_id)
+ return true;
+
+ return false;
+}
+
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -339,10 +381,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
if (err)
return pcibios_err_to_errno(err);
- if (sdev_id != dev_id) {
- mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
- return -EPERM;
- }
+
+ if (sdev_id == dev_id)
+ continue;
+
+ if (mlx5_is_mgt_ifc_pci_device(dev, sdev_id))
+ continue;
+
+ mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
+ return -EPERM;
}
return 0;
}
@@ -357,6 +404,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
return false;
}
+#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
+ err = mlx5_check_hotplug_interrupt(dev);
+ if (err)
+ return false;
+#endif
+
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return false;
@@ -650,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
mlx5_eq_notifier_register(dev, &fw_reset->nb);
}
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
{
- mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!fw_reset)
+ return;
+
+ mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
}
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
@@ -680,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
- struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ struct mlx5_fw_reset *fw_reset;
int err;
+ if (!MLX5_CAP_MCAM_REG(dev, mfrl))
+ return 0;
+
+ fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
if (!fw_reset)
return -ENOMEM;
fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
@@ -718,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
devl_params_unregister(priv_to_devlink(dev),
mlx5_fw_reset_devlink_params,
ARRAY_SIZE(mlx5_fw_reset_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8ff6dc9bc803..ad38e31822df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
return MLX5_SENSOR_PCI_COMM_ERR;
if (pci_channel_offline(dev->pdev))
return MLX5_SENSOR_PCI_ERR;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
return MLX5_SENSOR_NIC_DISABLED;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET)
return MLX5_SENSOR_NIC_SW_RESET;
if (sensor_fw_synd_rfr(dev))
return MLX5_SENSOR_FW_SYND_RFR;
@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET);
return true;
}
@@ -246,13 +246,13 @@ recover_from_sw_reset:
/* Recover from SW reset */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
msleep(20);
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
}
@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
- case MLX5_NIC_IFC_FULL:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
break;
- case MLX5_NIC_IFC_DISABLED:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED:
mlx5_core_warn(dev, "starting teardown\n");
break;
- case MLX5_NIC_IFC_NO_DRAM_NIC:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break;
- case MLX5_NIC_IFC_SW_RESET:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET:
/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
* 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
* and this is a VF), this is not recoverable by SW reset.
* Logging of this is handled elsewhere.
* 2. FW reset has been issued by another function, driver can
* be reloaded to recover after the mode switches to
- * MLX5_NIC_IFC_DISABLED.
+ * MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED.
*/
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset in progress\n");
@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct health_buffer __iomem *h = health->health;
u8 synd = ioread8(&h->synd);
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
if (!synd)
return 0;
- devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
return 0;
@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
&fw_reporter_ctx);
}
-static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
.dump = mlx5_fw_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = mlx5_fw_reporter_diagnose,
+};
+
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
-static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+};
+
#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
+ const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
+ const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
u64 grace_period;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
+ fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
} else if (mlx5_core_is_pf(dev)) {
@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
} else {
/* VF or SF */
grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
+ fw_ops = &mlx5_fw_reporter_ops;
}
health->fw_reporter =
- devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, dev);
+ devl_health_reporter_create(devlink, fw_ops, 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter =
devl_health_reporter_create(devlink,
- &mlx5_fw_fatal_reporter_ops,
+ fw_fatal_ops,
grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 2bf77a5251b4..d77be1b4dd9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -339,7 +339,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &ipriv->tisn);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -356,7 +356,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
+ mlx5e_destroy_tis(priv->mdev, ipriv->tisn);
mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
@@ -483,6 +483,18 @@ static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
return ARRAY_SIZE(mlx5i_stats_grps);
}
+u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ if (WARN(lag_port || tc,
+ "IPoIB unexpected non-zero value: lag_port (%u), tc (%u)\n",
+ lag_port, tc))
+ return 0;
+
+ return ipriv->tisn;
+}
+
static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init,
.cleanup = mlx5i_cleanup,
@@ -499,6 +511,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.max_tc = MLX5I_MAX_NUM_TC,
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
+ .get_tisn = mlx5i_get_tisn,
};
/* mlx5i netdev NDos */
@@ -770,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
}
/* This should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, false);
if (err)
goto destroy_ht;
}
@@ -829,7 +842,7 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
*params = (struct rdma_netdev_alloc_params){
.sizeof_priv = sizeof(struct mlx5i_priv) +
sizeof(struct mlx5e_priv),
- .txqs = nch * MLX5E_MAX_NUM_TC,
+ .txqs = nch * MLX5_MAX_NUM_TC,
.rxqs = nch,
.param = mdev,
.initialize_rdma_netdev = mlx5_rdma_setup_rn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index f3f2af972020..2ab6437a1c49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -53,6 +53,7 @@ extern const struct mlx5e_rx_handlers mlx5i_rx_handlers;
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
u32 qpn;
+ u32 tisn;
bool sub_interface;
u32 num_sub_interfaces;
u32 qkey;
@@ -63,6 +64,7 @@ struct mlx5i_priv {
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
+u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc);
/* Underlay QP create/destroy functions */
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 03e681297937..f87471306f6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -218,7 +218,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]);
+ err = mlx5i_create_tis(mdev, ipriv->qpn, &ipriv->tisn);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -240,7 +240,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
err_close_channels:
mlx5e_close_channels(&epriv->channels);
err_clear_state_opened_flag:
- mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
+ mlx5e_destroy_tis(mdev, ipriv->tisn);
err_remove_rx_uderlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_unint_underlay_qp:
@@ -269,7 +269,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
- mlx5e_destroy_tis(mdev, priv->tisn[0][0]);
+ mlx5e_destroy_tis(mdev, ipriv->tisn);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
@@ -361,6 +361,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.update_stats = NULL,
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
+ .get_tisn = mlx5i_get_tisn,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 047d5fed5f89..612e666ec263 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
if (pool->irqs_per_cpu)
cpu_put(pool, cpu);
}
-
-/**
- * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQ.
- * @used_cpus: cpumask of bounded cpus by the device
- * @vecidx: vector index to request an IRQ for.
- *
- * Each IRQ is bounded to at most 1 CPU.
- * This function is requesting an IRQ according to the default assignment.
- * The default assignment policy is:
- * - request the least loaded IRQ which is not bound to any
- * CPU of the previous IRQs requested.
- *
- * On success, this function updates used_cpus mask and returns an irq pointer.
- * In case of an error, an appropriate error pointer is returned.
- */
-struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
- struct cpumask *used_cpus, u16 vecidx)
-{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
- struct irq_affinity_desc af_desc = {};
- struct mlx5_irq *irq;
-
- if (!mlx5_irq_pool_is_sf_pool(pool))
- return ERR_PTR(-ENOENT);
-
- af_desc.is_managed = 1;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
- irq = mlx5_irq_affinity_request(pool, &af_desc);
-
- if (IS_ERR(irq))
- return irq;
-
- cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
-
- return irq;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 40c7be124041..58bd749b5e4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index aa29f09e8356..0361741632a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -266,9 +266,6 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
return -EINVAL;
@@ -286,12 +283,15 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_settime_real_time(mdev, ts);
- if (err)
- return err;
+
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_settime_real_time(mdev, ts);
+
+ if (err)
+ return err;
+ }
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
@@ -341,9 +341,6 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
/* HW time adjustment range is checked. If out of range, settime instead */
if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
struct timespec64 ts;
@@ -367,13 +364,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjtime_real_time(mdev, delta);
- if (err)
- return err;
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+
+ if (err)
+ return err;
+ }
+
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&timer->tc, delta);
mlx5_update_clock_info_page(mdev);
@@ -384,22 +384,26 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
- return mlx5_ptp_adjtime(ptp, delta);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ return mlx5_ptp_adjtime_real_time(mdev, delta);
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
- if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
+ scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
+ /* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
MLX5_SET(mtutc_reg, in, freq_adj_units,
MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
- MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
} else {
MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
@@ -415,13 +419,15 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_core_dev *mdev;
unsigned long flags;
u32 mult;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
- if (err)
- return err;
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+
+ if (err)
+ return err;
+ }
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -999,14 +1005,38 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
info->frac = timer->tc.frac;
}
+static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+ u8 log_max_freq_adjustment = 0;
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTUTC, 0, 0);
+ if (!err)
+ log_max_freq_adjustment =
+ MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
+
+ if (log_max_freq_adjustment)
+ clock->ptp_info.max_adj =
+ min(S32_MAX, 1 << log_max_freq_adjustment);
+}
+
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
+ /* Configure the PHC */
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ if (MLX5_CAP_MCAM_REG(mdev, mtutc))
+ mlx5_init_timer_max_freq_adjustment(mdev);
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
- clock->ptp_info = mlx5_ptp_clock_info;
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
@@ -1037,11 +1067,10 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
}
seqlock_init(&clock->lock);
- mlx5_init_timer_clock(mdev);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
- /* Configure the PHC */
- clock->ptp_info = mlx5_ptp_clock_info;
+ /* Initialize the device clock */
+ mlx5_init_timer_clock(mdev);
/* Initialize 1PPS data structures */
mlx5_init_pps(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index e8e50563e956..e7d59cfa8708 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -256,6 +256,13 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
devcom_free_comp_dev(devcom);
}
+int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
+{
+ struct mlx5_devcom_comp *comp = devcom->comp;
+
+ return kref_read(&comp->ref);
+}
+
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index fc23bbef87b4..ec32b686f586 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -31,6 +31,7 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data);
+int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom);
void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 9482e51ac82a..7c5516b0a844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -13,11 +13,13 @@ struct mlx5_dm {
unsigned long *steering_sw_icm_alloc_blocks;
unsigned long *header_modify_sw_icm_alloc_blocks;
unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
+ unsigned long *header_encap_sw_icm_alloc_blocks;
};
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
{
u64 header_modify_pattern_icm_blocks = 0;
+ u64 header_sw_encap_icm_blocks = 0;
u64 header_modify_icm_blocks = 0;
u64 steering_icm_blocks = 0;
struct mlx5_dm *dm;
@@ -54,6 +56,17 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
goto err_modify_hdr;
}
+ if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) {
+ header_sw_encap_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_encap_sw_icm_alloc_blocks =
+ bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL);
+ if (!dm->header_encap_sw_icm_alloc_blocks)
+ goto err_pattern;
+ }
+
support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
@@ -66,11 +79,14 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
dm->header_modify_pattern_sw_icm_alloc_blocks =
bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
- goto err_pattern;
+ goto err_sw_encap;
}
return dm;
+err_sw_encap:
+ bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+
err_pattern:
bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
@@ -105,6 +121,14 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
}
+ if (dm->header_encap_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_indirect_encap_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+ }
+
if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev,
@@ -164,6 +188,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
log_header_modify_pattern_sw_icm_size);
block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_SW_ENCAP:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ indirect_encap_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_indirect_encap_sw_icm_size);
+ block_map = dm->header_encap_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
@@ -242,6 +273,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
header_modify_pattern_sw_icm_start_address);
block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_SW_ENCAP:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ indirect_encap_sw_icm_start_address);
+ block_map = dm->header_encap_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a17152c1cbb2..c2593625c09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = {
};
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
- u32 warn_time_mili)
+ u32 warn_time_mili, const char *init_state)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing;
- int err = 0;
do {
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
- if (time_after(jiffies, end) ||
- test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
- err = -EBUSY;
- break;
+ if (time_after(jiffies, end)) {
+ mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
+ max_wait_mili, init_state);
+ return -ETIMEDOUT;
+ }
+ if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
+ mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
+ init_state);
+ return -ENODEV;
}
if (warn_time_mili && time_after(jiffies, warn)) {
- mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
- jiffies_to_msecs(end - warn) / 1000, fw_initializing);
+ mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
+ init_state, jiffies_to_msecs(end - warn) / 1000,
+ fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
} while (true);
- return err;
+ return 0;
}
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
@@ -219,7 +224,6 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
driver_version);
u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
- int remaining_size = driver_ver_sz;
char *string;
if (!MLX5_CAP_GEN(dev, driver_version))
@@ -227,22 +231,9 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
- strncpy(string, "Linux", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, ",", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, KBUILD_MODNAME, remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, ",", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
-
- snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL);
+ snprintf(string, driver_ver_sz, "Linux,%s,%u.%u.%u",
+ KBUILD_MODNAME, LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL);
/*Send the command*/
MLX5_SET(set_driver_version_in, in, opcode,
@@ -1165,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
- mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
- timeout);
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
+ "pre-initializing");
+ if (err)
return err;
- }
err = mlx5_cmd_enable(dev);
if (err) {
@@ -1180,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_tout_query_iseg(dev);
- err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
- mlx5_tout_ms(dev, FW_INIT));
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
+ if (err)
goto err_cmd_cleanup;
- }
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6b14e347d914..58732f44940f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -243,6 +243,7 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
+int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir);
void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
@@ -311,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
return ret;
}
-enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_SW_RESET = 7
-};
-
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 653648216730..4dcf995cb1a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -28,7 +28,7 @@
struct mlx5_irq {
struct atomic_notifier_head nh;
cpumask_var_t mask;
- char name[MLX5_MAX_IRQ_NAME];
+ char name[MLX5_MAX_IRQ_FORMATTED_NAME];
struct mlx5_irq_pool *pool;
int refcount;
struct msi_map map;
@@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
- "%s@pci:%s", name, pci_name(dev->pdev));
+ snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
+ MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
&irq->nh);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index d3a77a0ab848..c4d377f8df30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -7,6 +7,9 @@
#include <linux/mlx5/driver.h>
#define MLX5_MAX_IRQ_NAME (32)
+#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
+#define MLX5_MAX_IRQ_FORMATTED_NAME \
+ (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
/* max irq_index is 2047, so four chars */
#define MLX5_MAX_IRQ_IDX_CHARS (4)
#define MLX5_EQ_REFS_PER_IRQ (2)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7d8c732818f2..7fba1c46e2ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1206,3 +1206,13 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
*speed = max_speed;
return 0;
}
+
+int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir)
+{
+ u32 in[MLX5_ST_SZ_DW(mpir_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(mpir_reg);
+
+ MLX5_SET(mpir_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sz, mpir, sz, MLX5_REG_MPIR, 0, 0);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index c93492b67788..99219ea52c4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
-static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
+static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev,
+ struct mlx5_sf_dev *sf_dev)
{
int id;
@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
return;
xa_err:
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
}
static int
@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
- mlx5_sf_dev_remove(table->dev, sf_dev);
+ mlx5_sf_dev_remove_aux(table->dev, sf_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 169c2c68ed5c..bc863e1f062e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -95,24 +95,29 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
+ struct devlink *devlink;
- mlx5_drain_health_wq(sf_dev->mdev);
+ devlink = priv_to_devlink(mdev);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
devlink_unregister(devlink);
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
+ if (mlx5_dev_is_lightweight(mdev))
+ mlx5_uninit_one_light(mdev);
else
- mlx5_uninit_one(sf_dev->mdev);
- iounmap(sf_dev->mdev->iseg);
- mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_uninit_one(mdev);
+ iounmap(mdev->iseg);
+ mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink);
}
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
- mlx5_unload_one(sf_dev->mdev, false);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_unload_one(mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 6ea88a581804..2ebb61ef3ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -57,7 +57,8 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
{
- return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
+ return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
+ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
}
@@ -787,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) {
case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break;
case DR_ACTION_TYP_FT:
dest_action = action;
@@ -872,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->sampler->tx_icm_addr;
break;
case DR_ACTION_TYP_VPORT:
- attr.hit_gvmi = action->vport->caps->vhca_gvmi;
- dest_action = action;
- attr.final_icm_addr = rx_rule ?
- action->vport->caps->icm_address_rx :
- action->vport->caps->icm_address_tx;
+ if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+ /* can't go to uplink on RX rule - dropping instead */
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+ } else {
+ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+ dest_action = action;
+ attr.final_icm_addr = rx_rule ?
+ action->vport->caps->icm_address_rx :
+ action->vport->caps->icm_address_tx;
+ }
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &
@@ -1169,7 +1177,6 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
- struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
@@ -1248,11 +1255,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
* one that done in the TX.
* So, if one of the ft target is wire, put it at the end of the dest list.
*/
- if (is_ft_wire && num_dst_ft > 1) {
- tmp_hw_dest = hw_dests[last_dest];
- hw_dests[last_dest] = hw_dests[num_of_dests - 1];
- hw_dests[num_of_dests - 1] = tmp_hw_dest;
- }
+ if (is_ft_wire && num_dst_ft > 1)
+ swap(hw_dests[last_dest], hw_dests[num_of_dests - 1]);
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 7e36e1062139..64f4cc284aea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -54,6 +54,107 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
+static struct mlx5dr_dbg_dump_buff *
+mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *new_buff;
+
+ new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
+ if (!new_buff)
+ return NULL;
+
+ new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
+ if (!new_buff->buff) {
+ kfree(new_buff);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&new_buff->node);
+ list_add_tail(&new_buff->node, &dump_data->buff_list);
+
+ return new_buff;
+}
+
+static struct mlx5dr_dbg_dump_data *
+mlx5dr_dbg_create_dump_data(void)
+{
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
+ if (!dump_data)
+ return NULL;
+
+ INIT_LIST_HEAD(&dump_data->buff_list);
+
+ if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
+ kfree(dump_data);
+ return NULL;
+ }
+
+ return dump_data;
+}
+
+static void
+mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
+
+ if (!dump_data)
+ return;
+
+ list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
+ kvfree(dump_buff->buff);
+ list_del(&dump_buff->node);
+ kfree(dump_buff);
+ }
+
+ kfree(dump_data);
+}
+
+static int
+mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ struct mlx5dr_dbg_dump_buff *buff;
+ u32 buff_capacity, write_size;
+ int remain_size, ret;
+
+ if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
+ return -EINVAL;
+
+ dump_data = dmn->dump_info.dump_data;
+ buff = list_last_entry(&dump_data->buff_list,
+ struct mlx5dr_dbg_dump_buff, node);
+
+ buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
+ remain_size = buff_capacity - size;
+ write_size = (remain_size > 0) ? size : buff_capacity;
+
+ if (likely(write_size)) {
+ ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
+ if (ret < 0)
+ return ret;
+
+ buff->index += write_size;
+ }
+
+ if (remain_size < 0) {
+ remain_size *= -1;
+ buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
+ if (!buff)
+ return -ENOMEM;
+
+ ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
+ if (ret < 0)
+ return ret;
+
+ buff->index += remain_size;
+ }
+
+ return 0;
+}
+
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
@@ -109,36 +210,68 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
+ int ret;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id,
- -1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id,
- DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_CTR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
- action->ctr->ctr_id + action->ctr->offset);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+ action->ctr->ctr_id + action->ctr->offset);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TAG:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
- action->flow_tag->flow_tag);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+ action->flow_tag->flow_tag);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_MODIFY_HDR:
{
@@ -150,83 +283,171 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
- DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
- rule_id, action->rewrite->index,
- action->rewrite->single_action_opt,
- ptrn_arg ? action->rewrite->num_of_actions : 0,
- ptrn_arg ? ptrn->index : 0,
- ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+ rule_id, action->rewrite->index,
+ action->rewrite->single_action_opt,
+ ptrn_arg ? action->rewrite->num_of_actions : 0,
+ ptrn_arg ? ptrn->index : 0,
+ ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (ptrn_arg) {
for (i = 0; i < action->rewrite->num_of_actions; i++) {
- seq_printf(file, ",0x%016llx",
- be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ ",0x%016llx",
+ be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
}
- seq_puts(file, "\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
}
case DR_ACTION_TYP_VPORT:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
- action->vport->caps->num);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+ action->vport->caps->num);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
- rule_id,
- (action->rewrite->ptrn && action->rewrite->arg) ?
- mlx5dr_arg_get_obj_id(action->rewrite->arg) :
- action->rewrite->index);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+ rule_id,
+ (action->rewrite->ptrn && action->rewrite->arg) ?
+ mlx5dr_arg_get_obj_id(action->rewrite->arg) :
+ action->rewrite->index);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L3:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_POP_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
- rule_id, action->push_vlan->vlan_hdr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+ rule_id, action->push_vlan->vlan_hdr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_INSERT_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_REMOVE_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_SAMPLER:
- seq_printf(file,
- "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
- 0, 0, action->sampler->sampler_id,
- action->sampler->rx_icm_addr,
- action->sampler->tx_icm_addr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
+ rule_id, 0, 0, action->sampler->sampler_id,
+ action->sampler->rx_icm_addr,
+ action->sampler->tx_icm_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_RANGE:
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
@@ -247,10 +468,17 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
}
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
- hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
- action->range->definer_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
+ rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
+ miss_tbl_ptr, action->range->definer_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
default:
return 0;
@@ -263,8 +491,10 @@ static int
dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
+ int ret;
if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
@@ -277,9 +507,16 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
DR_STE_SIZE_REDUCED);
- seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
- dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
- hw_ste_dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+ dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
+ rule_id, hw_ste_dump);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -309,6 +546,7 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_rule_rx_tx *rx = &rule->rx;
struct mlx5dr_rule_rx_tx *tx = &rule->tx;
u8 format_ver;
@@ -316,8 +554,15 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
- seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
- DR_DBG_PTR_TO_ID(rule->matcher));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
+ rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
@@ -344,46 +589,94 @@ static int
dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
+ int ret;
- seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
- matcher_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
+ DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (criteria & DR_MATCHER_CRITERIA_OUTER) {
dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_INNER) {
dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC) {
dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC2) {
dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC3) {
dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
- seq_printf(file, "%s\n", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s\n", dump);
} else {
- seq_puts(file, ",\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -391,9 +684,19 @@ static int
dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
- DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
- builder->lu_type);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,%d,0x%x\n",
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
+ is_rx, builder->lu_type);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -403,6 +706,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -412,11 +716,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
- rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
- matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(s_icm_addr),
- dr_dump_icm_to_idx(e_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+ rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+ matcher_id, matcher_rx_tx->num_of_builders,
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
+
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -434,13 +746,22 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 matcher_id;
int ret;
matcher_id = DR_DBG_PTR_TO_ID(matcher);
- seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
- matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+ matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
+ matcher->prio);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_matcher_mask(file, &matcher->mask,
matcher->match_criteria, matcher_id);
@@ -486,15 +807,24 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
+ int ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(s_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", rec_type, table_id,
+ dr_dump_icm_to_idx(s_icm_addr));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -503,11 +833,19 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
- seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
- DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
- table->table_type, table->level);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+ DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+ table->table_type, table->level);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, true, rx,
@@ -546,46 +884,86 @@ static int
dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
- domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
+ DR_DBG_PTR_TO_ID(ring), domain_id,
+ ring->cq->mcq.cqn, ring->qp->qpn);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_flex_parser(struct seq_file *file,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,%s,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
- flex_parser_name, flex_parser_value);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%s,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+ flex_parser_name, flex_parser_value);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
+ int ret;
xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
; /* count the number of vports in xarray */
- seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
- caps->nic_rx_drop_address, caps->nic_tx_drop_address,
- caps->flex_protocols, vports_num, caps->eswitch_manager);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+ caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+ caps->flex_protocols, vports_num, caps->eswitch_manager);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
- seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
- vport_caps->vport_gvmi, vport_caps->icm_address_rx,
- vport_caps->icm_address_tx);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
+ domain_id, i, vport_caps->vport_gvmi,
+ vport_caps->icm_address_rx,
+ vport_caps->icm_address_tx);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -627,24 +1005,32 @@ dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
int ret;
- seq_printf(file, "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
- DR_DUMP_REC_TYPE_DOMAIN,
- domain_id, dmn->type, dmn->info.caps.gvmi,
- dmn->info.supp_sw_steering,
- /* package version */
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL,
- pci_name(dmn->mdev->pdev),
- 0, /* domain flags */
- dmn->num_buddies[DR_ICM_TYPE_STE],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
+ DR_DUMP_REC_TYPE_DOMAIN,
+ domain_id, dmn->type, dmn->info.caps.gvmi,
+ dmn->info.supp_sw_steering,
+ /* package version */
+ LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL,
+ pci_name(dmn->mdev->pdev),
+ 0, /* domain flags */
+ dmn->num_buddies[DR_ICM_TYPE_STE],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_domain_info(file, &dmn->info, domain_id);
if (ret < 0)
@@ -683,11 +1069,91 @@ unlock_mutex:
return ret;
}
-static int dr_dump_show(struct seq_file *file, void *priv)
+static void *
+dr_dump_start(struct seq_file *file, loff_t *pos)
{
- return dr_dump_domain_all(file, file->private);
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
+ mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
+ dump_data = dmn->dump_info.dump_data;
+
+ if (dump_data) {
+ return seq_list_start(&dump_data->buff_list, *pos);
+ } else if (*pos == 0) {
+ dump_data = mlx5dr_dbg_create_dump_data();
+ if (!dump_data)
+ goto exit;
+
+ dmn->dump_info.dump_data = dump_data;
+ if (dr_dump_domain_all(file, dmn)) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ goto exit;
+ }
+
+ return seq_list_start(&dump_data->buff_list, *pos);
+ }
+
+exit:
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+ return NULL;
}
-DEFINE_SHOW_ATTRIBUTE(dr_dump);
+
+static void *
+dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = dmn->dump_info.dump_data;
+
+ return seq_list_next(v, &dump_data->buff_list, pos);
+}
+
+static void
+dr_dump_stop(struct seq_file *file, void *v)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (v && IS_ERR(v))
+ return;
+
+ if (!v) {
+ dump_data = dmn->dump_info.dump_data;
+ if (dump_data) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ }
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+}
+
+static int
+dr_dump_show(struct seq_file *file, void *v)
+{
+ struct mlx5dr_dbg_dump_buff *entry;
+
+ entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
+ seq_printf(file, "%s", entry->buff);
+
+ return 0;
+}
+
+static const struct seq_operations dr_dump_sops = {
+ .start = dr_dump_start,
+ .next = dr_dump_next,
+ .stop = dr_dump_stop,
+ .show = dr_dump_show,
+};
+DEFINE_SEQ_ATTRIBUTE(dr_dump);
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
index def6cf853eea..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
@@ -1,10 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
+#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
+
+enum {
+ MLX5DR_DEBUG_DUMP_STATE_FREE,
+ MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
+};
+
+struct mlx5dr_dbg_dump_buff {
+ char *buff;
+ u32 index;
+ struct list_head node;
+};
+
+struct mlx5dr_dbg_dump_data {
+ struct list_head buff_list;
+};
+
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ atomic_t state;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 4e8527a724f5..6fa06ba2d346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -52,7 +52,6 @@ struct dr_qp_init_attr {
u32 cqn;
u32 pdn;
u32 max_send_wr;
- u32 max_send_sge;
struct mlx5_uars_page *uar;
u8 isolate_vl_tc:1;
};
@@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
return err == CQ_POLL_ERR ? err : npolled;
}
-static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
-{
- return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
- sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
-}
-
-/* We calculate for specific RC QP with the required functionality */
-static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
-{
- int update_arg_size;
- int inl_size = 0;
- int tot_size;
- int size;
-
- update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
-
- size = sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
- inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
- DR_STE_SIZE, 16);
-
- size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
-
- size = max(size, update_arg_size);
-
- tot_size = max(size, inl_size);
-
- return ALIGN(tot_size, MLX5_SEND_WQE_BB);
-}
-
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
struct dr_qp_init_attr *attr)
{
@@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
struct mlx5_wq_param wqp;
struct mlx5dr_qp *dr_qp;
- int wqe_size;
int inlen;
void *qpc;
void *in;
@@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
if (err)
goto err_in;
dr_qp->uar = attr->uar;
- wqe_size = dr_qp_calc_rc_send_wqe(attr);
- dr_qp->max_inline_data = min(wqe_size -
- (sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_inline_seg)),
- (2 * MLX5_SEND_WQE_BB -
- (sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_inline_seg))));
return dr_qp;
@@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
MLX5_SEND_WQE_DS;
}
-static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
- struct dr_data_seg *data_seg, void *wqe)
-{
- int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_inline_seg);
- struct mlx5_wqe_inline_seg *seg;
- int left_space;
- int inl = 0;
- void *addr;
- int len;
- int idx;
-
- seg = wqe;
- wqe += sizeof(*seg);
- addr = (void *)(unsigned long)(data_seg->addr);
- len = data_seg->length;
- inl += len;
- left_space = MLX5_SEND_WQE_BB - inline_header_size;
-
- if (likely(len > left_space)) {
- memcpy(wqe, addr, left_space);
- len -= left_space;
- addr += left_space;
- idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
- wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
- }
-
- memcpy(wqe, addr, len);
-
- if (likely(inl)) {
- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
- return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
- MLX5_SEND_WQE_DS);
- } else {
- return 0;
- }
-}
-
static void
-dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
- struct mlx5_wqe_ctrl_seg *wq_ctrl,
+dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
u64 remote_addr,
u32 rkey,
struct dr_data_seg *data_seg,
@@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
wq_raddr->reserved = 0;
wq_dseg = (void *)(wq_raddr + 1);
- /* WQE ctrl segment + WQE remote addr segment */
- *size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
- if (data_seg->send_flags & IB_SEND_INLINE) {
- *size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
- } else {
- wq_dseg->byte_count = cpu_to_be32(data_seg->length);
- wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
- wq_dseg->addr = cpu_to_be64(data_seg->addr);
- *size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
- }
+ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+ wq_dseg->addr = cpu_to_be64(data_seg->addr);
+
+ *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
+ sizeof(*wq_dseg) + /* WQE data segment */
+ sizeof(*wq_raddr)) / /* WQE remote addr segment */
+ MLX5_SEND_WQE_DS;
}
static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
@@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
switch (opcode) {
case MLX5_OPCODE_RDMA_READ:
case MLX5_OPCODE_RDMA_WRITE:
- dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
+ dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
rkey, data_seg, &size);
break;
case MLX5_OPCODE_FLOW_TBL_ACCESS:
@@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
else
- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
+ send_info->write.send_flags = 0;
}
static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
@@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
}
send_ring->pending_wqe++;
- if (!send_info->write.lkey)
- send_info->write.send_flags |= IB_SEND_INLINE;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
- else
- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
send_ring->pending_wqe++;
send_info->read.length = send_info->write.length;
@@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
send_info->read.lkey = send_ring->sync_mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
- send_info->read.send_flags |= IB_SEND_SIGNALED;
+ send_info->read.send_flags = IB_SEND_SIGNALED;
else
- send_info->read.send_flags &= ~IB_SEND_SIGNALED;
+ send_info->read.send_flags = 0;
}
static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
@@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->cq->qp = dmn->send_ring->qp;
dmn->info.max_send_wr = QUEUE_SIZE;
- init_attr.max_send_sge = 1;
dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
DR_STE_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 5a31fb47ffa5..1005bb6935b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
req_list_size = max_list_size;
}
- out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
+ out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
out = kvzalloc(out_sz, GFP_KERNEL);
@@ -440,6 +440,27 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 *out;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
+
+ *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.sd_group);
+out:
+ kvfree(out);
+ return err;
+}
+
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;