Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2018-01-23

This series contains updates to i40e and i40evf only.

Pawel enables FlatNVM support on x722 devices by allowing nvmupdate tool
to configure the preservation flags in the AdminQ command.

Mitch fixes a potential divide by zero error when DCB is enabled and
the firmware fails to configure the VSI, so check for this state.
Fixed a bug where the driver could fail to adhere to ETS bandwidth
allocations if 8 traffic classes were configured on the switch.

Sudheer fixes a potential deadlock by avoiding to call
flush_schedule_work() in i40evf_remove(), since cancel_work_sync()
and cancel_delayed_work_sync() already cleans up necessary work items.
Fixed an issue with the problematic detection and recovery from
hung queues in the PF which was causing lost interrupts.  This is done
by triggering a software interrupt so that interrupts are forced on
and if we are already in napi_poll and an interrupt fires, napi_poll
will not be rescheduled and the interrupt is lost.

Avinash fixes an issue in the VF where is was possible to issue a
reset_task while the device is currently being removed.

Michal fixes an issue occurring while calling i40e_led_set() with
the blink parameter set to true, which was causing the activity LED
instead of the link LED to blink for port identification.

Shiraz changes the client interface to not call client close/open on
netdev down/up events, since this causes a lot of thrash that is
not needed.  Instead, disable the PE TCP-ENA flag during a netdev
down event and re-enable on a netdev up event, since this blocks all
TCP traffic to the RDMA protocol engine.

Alan fixes an issue which was causing a potential transmit hang by
ignoring the PF link up message if the VF state is not yet in the
RUNNING state.

Amritha fixes the channel VSI recreation during the reset flow to
reconfigure the transmit rings and the queue context associated with
the channel VSI.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-01-23 20:22:57 -05:00
commit 521504640f
18 changed files with 404 additions and 211 deletions

View File

@ -1027,7 +1027,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)

View File

@ -2231,8 +2231,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;

View File

@ -378,11 +378,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
if (!client || !cdev)
return;
/* Here we handle client opens. If the client is down, but
* the netdev is up, then open the client.
/* Here we handle client opens. If the client is down, and
* the netdev is registered, then open the client.
*/
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
if (vsi->netdev_registered &&
client->ops && client->ops->open) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
ret = client->ops->open(&cdev->lan_info, client);
@ -393,17 +393,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
i40e_client_del_instance(pf);
}
}
} else {
/* Likewise for client close. If the client is up, but the netdev
* is down, then close the client.
*/
if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
client->ops && client->ops->close) {
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
client->ops->close(&cdev->lan_info, client, false);
i40e_client_release_qvlist(&cdev->lan_info);
}
}
/* enable/disable PE TCP_ENA flag based on netdev down/up
*/
if (test_bit(__I40E_VSI_DOWN, vsi->state))
i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
0, 0, 0,
I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
else
i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
0, 0,
I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
}
/**
@ -717,13 +719,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
return -ENOENT;
}
if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
} else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
!(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
} else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
!(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;

View File

@ -132,7 +132,7 @@ struct i40e_info {
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
struct i40e_ops {
/* setup_q_vector_list enables queues with a particular vector */

View File

@ -1486,6 +1486,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
case I40E_LINK_ACTIVITY:
continue;
default:
break;
@ -1534,6 +1535,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
case I40E_LINK_ACTIVITY:
continue;
default:
break;
@ -1544,9 +1546,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
if (mode == I40E_LINK_ACTIVITY)
blink = false;
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@ -3465,13 +3464,14 @@ exit:
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
* @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@ -3490,6 +3490,16 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
if (hw->mac.type == I40E_MAC_X722) {
if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
cmd->command_flags |=
(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
cmd->command_flags |=
(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
}
cmd->module_pointer = module_pointer;
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);

View File

@ -4876,104 +4876,6 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
#endif
/**
* i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
* @q_idx: TX queue number
* @vsi: Pointer to VSI struct
*
* This function checks specified queue for given VSI. Detects hung condition.
* We proactively detect hung TX queues by checking if interrupts are disabled
* but there are pending descriptors. If it appears hung, attempt to recover
* by triggering a SW interrupt.
**/
static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct i40e_pf *pf;
u32 val, tx_pending;
int i;
pf = vsi->back;
/* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_queue_pairs; i++) {
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
if (q_idx == vsi->tx_rings[i]->queue_index) {
tx_ring = vsi->tx_rings[i];
break;
}
}
}
if (!tx_ring)
return;
/* Read interrupt register */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
val = rd32(&pf->hw,
I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
tx_ring->vsi->base_vector - 1));
else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
tx_pending = i40e_get_tx_pending(tx_ring);
/* Interrupts are disabled and TX pending is non-zero,
* trigger the SW interrupt (don't wait). Worst case
* there will be one extra interrupt which may result
* into not cleaning any queues because queues are cleaned.
*/
if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
i40e_force_wb(vsi, tx_ring->q_vector);
}
/**
* i40e_detect_recover_hung - Function to detect and recover hung_queues
* @pf: pointer to PF struct
*
* LAN VSI has netdev and netdev has TX queues. This function is to check
* each of those TX queues if they are hung, trigger recovery by issuing
* SW interrupt.
**/
static void i40e_detect_recover_hung(struct i40e_pf *pf)
{
struct net_device *netdev;
struct i40e_vsi *vsi;
unsigned int i;
/* Only for LAN VSI */
vsi = pf->vsi[pf->lan_vsi];
if (!vsi)
return;
/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return;
/* Make sure type is MAIN VSI */
if (vsi->type != I40E_VSI_MAIN)
return;
netdev = vsi->netdev;
if (!netdev)
return;
/* Bail out if netif_carrier is not OK */
if (!netif_carrier_ok(netdev))
return;
/* Go thru' TX queues for netdev */
for (i = 0; i < netdev->num_tx_queues; i++) {
struct netdev_queue *q;
q = netdev_get_tx_queue(netdev, i);
if (q)
i40e_detect_recover_hung_queue(i, vsi);
}
}
/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
* @pf: pointer to PF
@ -5342,6 +5244,8 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
{
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
int ret = 0;
int i;
@ -5359,10 +5263,40 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
if (ret) {
dev_info(&vsi->back->pdev->dev,
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
dev_info(&pf->pdev->dev,
"Failed configuring TC map %d for VSI %d\n",
enabled_tc, vsi->seid);
goto out;
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
&bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed querying vsi bw info, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
if (!valid_tc)
valid_tc = bw_config.tc_valid_bits;
/* Always enable TC0, no matter what */
valid_tc |= 1;
dev_info(&pf->pdev->dev,
"Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
enabled_tc, bw_config.tc_valid_bits, valid_tc);
enabled_tc = valid_tc;
}
ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
if (ret) {
dev_err(&pf->pdev->dev,
"Unable to configure TC map %d for VSI %d\n",
enabled_tc, vsi->seid);
goto out;
}
}
/* Update Queue Pairs Mapping for currently enabled UPs */
@ -5402,13 +5336,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update the VSI after updating the VSI queue-mapping
* information
*/
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %s aq_err %s\n",
i40e_stat_str(&vsi->back->hw, ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@ -5418,11 +5351,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update current VSI BW information */
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev,
dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %s aq_err %s\n",
i40e_stat_str(&vsi->back->hw, ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@ -9075,6 +9007,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
vsi->uplink_seid);
return ret;
}
/* Reconfigure TX queues using QTX_CTL register */
ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"failed to configure TX rings for channel %u\n",
ch->seid);
return ret;
}
/* update 'next_base_queue' */
vsi->next_base_queue = vsi->next_base_queue +
ch->num_queue_pairs;
if (ch->max_tx_rate) {
u64 credits = ch->max_tx_rate;
@ -9695,7 +9638,7 @@ static void i40e_service_task(struct work_struct *work)
if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
i40e_detect_recover_hung(pf);
i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@ -10462,10 +10405,9 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
/* set up vector assignment tracking */
size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
pf->irq_pile = kzalloc(size, GFP_KERNEL);
if (!pf->irq_pile) {
dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
if (!pf->irq_pile)
return -ENOMEM;
}
pf->irq_pile->num_entries = vectors;
pf->irq_pile->search_hint = 0;
@ -10783,8 +10725,13 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* Determine the RSS size of the VSI */
if (!vsi->rss_size) {
u16 qcount;
qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
/* If the firmware does something weird during VSI init, we
* could end up with zero TCs. Check for that to avoid
* divide-by-zero. It probably won't pass traffic, but it also
* won't panic.
*/
qcount = vsi->num_queue_pairs /
(vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
}
if (!vsi->rss_size)
@ -10972,7 +10919,7 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
ret = i40e_aq_update_nvm(&pf->hw,
I40E_SR_NVM_CONTROL_WORD,
0x10, sizeof(nvm_word),
&nvm_word, true, NULL);
&nvm_word, true, 0, NULL);
/* Save off last admin queue command status before releasing
* the NVM
*/

View File

@ -239,8 +239,9 @@ read_nvm_exit:
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 words, void *data,
static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
u16 words, void *data,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
@ -496,7 +497,8 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
data, last_command, &cmd_details);
data, last_command, 0,
&cmd_details);
return ret_code;
}
@ -677,6 +679,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@ -686,6 +691,12 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
{
return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
I40E_NVM_PRESERVATION_FLAGS_SHIFT);
}
static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@ -703,6 +714,7 @@ static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
"I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@ -798,9 +810,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
i40e_nvmupd_clear_wait_state(hw);
status = 0;
goto exit;
break;
}
status = I40E_ERR_NOT_READY;
@ -815,7 +827,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
exit:
mutex_unlock(&hw->aq.arq_mutex);
return status;
}
@ -944,6 +956,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_GET_AQ_EVENT:
status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
break;
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@ -1117,39 +1133,54 @@ retry:
return status;
}
/**
* i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
**/
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n",
hw->nvm_wait_opcode);
if (hw->nvm_release_on_done) {
i40e_release_nvm(hw);
hw->nvm_release_on_done = false;
}
hw->nvm_wait_opcode = 0;
if (hw->aq.arq_last_status) {
hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
return;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_STATE_WRITE_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
break;
default:
break;
}
}
/**
* i40e_nvmupd_check_wait_event - handle NVM update operation events
* @hw: pointer to the hardware structure
* @opcode: the event that just happened
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc)
{
u32 aq_desc_len = sizeof(struct i40e_aq_desc);
if (opcode == hw->nvm_wait_opcode) {
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
if (hw->nvm_release_on_done) {
i40e_release_nvm(hw);
hw->nvm_release_on_done = false;
}
hw->nvm_wait_opcode = 0;
if (hw->aq.arq_last_status) {
hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
return;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_STATE_WRITE_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
break;
default:
break;
}
memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
i40e_nvmupd_clear_wait_state(hw);
}
}
@ -1205,6 +1236,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
case I40E_NVM_AQE:
upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
break;
}
break;
@ -1267,6 +1301,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
if (cmd->offset == 0xffff)
return 0;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@ -1302,6 +1339,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
if (cmd->offset)
memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@ -1311,6 +1351,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
}
/* should we wait for a followup event? */
@ -1391,6 +1432,40 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
return 0;
}
/**
* i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @perrno: pointer to return error code
*
* cmd structure contains identifiers and data buffer
**/
static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
aq_desc_len = sizeof(struct i40e_aq_desc);
aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
/* check copylength range */
if (cmd->data_size > aq_total_len) {
i40e_debug(hw, I40E_DEBUG_NVM,
"%s: copy length %d too big, trimming to %d\n",
__func__, cmd->data_size, aq_total_len);
cmd->data_size = aq_total_len;
}
memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
return 0;
}
/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
@ -1486,18 +1561,20 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
&cmd_details);
preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",

View File

@ -214,7 +214,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@ -333,7 +333,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];

View File

@ -726,6 +726,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
return 0;
}
/**
* i40e_detect_recover_hung - Function to detect and recover hung_queues
* @vsi: pointer to vsi struct with tx queues
*
* VSI has netdev and netdev has TX queues. This function is to check each of
* those TX queues if they are hung, trigger recovery by issuing SW interrupt.
**/
void i40e_detect_recover_hung(struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
int packets;
if (!vsi)
return;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
netdev = vsi->netdev;
if (!netdev)
return;
if (!netif_carrier_ok(netdev))
return;
for (i = 0; i < vsi->num_queue_pairs; i++) {
tx_ring = vsi->tx_rings[i];
if (tx_ring && tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
*
* prev_pkt_ctr would be negative if there was no
* pending work.
*/
packets = tx_ring->stats.packets & INT_MAX;
if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
i40e_force_wb(vsi, tx_ring->q_vector);
continue;
}
/* Memory barrier between read of packet count and call
* to i40e_get_tx_pending()
*/
smp_rmb();
tx_ring->tx_stats.prev_pkt_ctr =
i40e_get_tx_pending(tx_ring) ? packets : -1;
}
}
}
#define WB_STRIDE 4
/**
@ -1163,6 +1216,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:

View File

@ -333,6 +333,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
int prev_pkt_ctr;
};
struct i40e_rx_queue_stats {
@ -501,6 +502,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring);
void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);

View File

@ -402,6 +402,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@ -421,15 +422,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
#define I40E_NVM_TRANS_SHIFT 8
#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
#define I40E_NVM_CON 0x0
#define I40E_NVM_SNT 0x1
#define I40E_NVM_LCB 0x2
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
#define I40E_NVM_EXEC 0xf
#define I40E_NVM_TRANS_SHIFT 8
#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
#define I40E_NVM_PRESERVATION_FLAGS_MASK \
(0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
#define I40E_NVM_CON 0x0
#define I40E_NVM_SNT 0x1
#define I40E_NVM_LCB 0x2
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
#define I40E_NVM_AQE 0xe
#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@ -611,6 +618,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;

View File

@ -2196,8 +2196,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;

View File

@ -148,6 +148,59 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
/**
* i40evf_detect_recover_hung - Function to detect and recover hung_queues
* @vsi: pointer to vsi struct with tx queues
*
* VSI has netdev and netdev has TX queues. This function is to check each of
* those TX queues if they are hung, trigger recovery by issuing SW interrupt.
**/
void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
int packets;
if (!vsi)
return;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
netdev = vsi->netdev;
if (!netdev)
return;
if (!netif_carrier_ok(netdev))
return;
for (i = 0; i < vsi->back->num_active_queues; i++) {
tx_ring = &vsi->back->tx_rings[i];
if (tx_ring && tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
*
* prev_pkt_ctr would be negative if there was no
* pending work.
*/
packets = tx_ring->stats.packets & INT_MAX;
if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
i40evf_force_wb(vsi, tx_ring->q_vector);
continue;
}
/* Memory barrier between read of packet count and call
* to i40evf_get_tx_pending()
*/
smp_rmb();
tx_ring->tx_stats.prev_pkt_ctr =
i40evf_get_tx_pending(tx_ring, false) ? packets : -1;
}
}
}
#define WB_STRIDE 4
/**
@ -469,6 +522,7 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:

View File

@ -313,6 +313,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@ -467,6 +468,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);

View File

@ -361,6 +361,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@ -380,15 +381,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
#define I40E_NVM_TRANS_SHIFT 8
#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
#define I40E_NVM_CON 0x0
#define I40E_NVM_SNT 0x1
#define I40E_NVM_LCB 0x2
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
#define I40E_NVM_EXEC 0xf
#define I40E_NVM_TRANS_SHIFT 8
#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
#define I40E_NVM_PRESERVATION_FLAGS_MASK \
(0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
#define I40E_NVM_CON 0x0
#define I40E_NVM_SNT 0x1
#define I40E_NVM_LCB 0x2
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
#define I40E_NVM_AQE 0xe
#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@ -561,6 +568,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;

View File

@ -187,6 +187,7 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
__I40EVF_IN_CLIENT_TASK,
__I40EVF_IN_REMOVE_TASK, /* device being removed */
};
/* board specific private data structure */

View File

@ -1716,6 +1716,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
if (adapter->state == __I40EVF_RUNNING)
i40evf_detect_recover_hung(&adapter->vsi);
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@ -1803,6 +1805,12 @@ static void i40evf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
return;
while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
@ -3053,7 +3061,8 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
int err;
/* Indicate we are in remove and not to run reset_task */
set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->client_task);
@ -3088,8 +3097,6 @@ static void i40evf_remove(struct pci_dev *pdev)
if (adapter->watchdog_timer.function)
del_timer_sync(&adapter->watchdog_timer);
flush_scheduled_work();
i40evf_free_rss(adapter);
if (hw->aq.asq.count)

View File

@ -1001,23 +1001,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode == VIRTCHNL_OP_EVENT) {
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
bool link_up = vpe->event_data.link_event.link_status;
switch (vpe->event) {
case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
vpe->event_data.link_event.link_speed;
if (adapter->link_up !=
vpe->event_data.link_event.link_status) {
adapter->link_up =
vpe->event_data.link_event.link_status;
if (adapter->link_up) {
netif_tx_start_all_queues(netdev);
netif_carrier_on(netdev);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
i40evf_print_link_message(adapter);
/* we've already got the right link status, bail */
if (adapter->link_up == link_up)
break;
/* If we get link up message and start queues before
* our queues are configured it will trigger a TX hang.
* In that case, just ignore the link status message,
* we'll get another one after we enable queues and
* actually prepared to send traffic.
*/
if (link_up && adapter->state != __I40EVF_RUNNING)
break;
adapter->link_up = link_up;
if (link_up) {
netif_tx_start_all_queues(netdev);
netif_carrier_on(netdev);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
i40evf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");