diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c')
| -rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 715 |
1 files changed, 686 insertions, 29 deletions
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 14e9ebd3b73..44d547d78b8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -18,12 +18,21 @@ #define QLC_BC_MSG 0 #define QLC_BC_CFREE 1 +#define QLC_BC_FLR 2 #define QLC_BC_HDR_SZ 16 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 +#define QLC_83XX_VF_RESET_FAIL_THRESH 8 +#define QLC_BC_CMD_MAX_RETRY_CNT 5 + +static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); +static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); +static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); +static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); +static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); @@ -57,12 +66,13 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { .config_promisc_mode = qlcnic_83xx_nic_set_promisc, .change_l2_filter = qlcnic_83xx_change_l2_filter, .get_board_info = qlcnic_83xx_get_port_info, + .free_mac_list = qlcnic_sriov_vf_free_mac_list, }; static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { .config_bridged_mode = qlcnic_config_bridged_mode, .config_led = qlcnic_config_led, - .cancel_idc_work = qlcnic_83xx_idc_exit, + .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, .napi_add = qlcnic_83xx_napi_add, .napi_del = qlcnic_83xx_napi_del, .config_ipaddr = qlcnic_83xx_config_ipaddr, @@ -72,6 +82,8 @@ static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, + {QLCNIC_BC_CMD_GET_ACL, 3, 14}, + {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2}, }; static inline bool qlcnic_sriov_bc_msg_check(u32 val) @@ -84,6 +96,11 @@ static inline bool qlcnic_sriov_channel_free_check(u32 val) return (val & (1 << QLC_BC_CFREE)) ? true : false; } +static inline bool qlcnic_sriov_flr_check(u32 val) +{ + return (val & (1 << QLC_BC_FLR)) ? true : false; +} + static inline u8 qlcnic_sriov_target_func_id(u32 val) { return (val >> 4) & 0xff; @@ -169,6 +186,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) goto qlcnic_destroy_async_wq; } sriov->vf_info[i].vp = vp; + vp->max_tx_bw = MAX_BW; random_ether_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", @@ -192,10 +210,33 @@ qlcnic_free_sriov: return err; } +void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list) +{ + struct qlcnic_bc_trans *trans; + struct qlcnic_cmd_args cmd; + unsigned long flags; + + spin_lock_irqsave(&t_list->lock, flags); + + while (!list_empty(&t_list->wait_list)) { + trans = list_first_entry(&t_list->wait_list, + struct qlcnic_bc_trans, list); + list_del(&trans->list); + t_list->count--; + cmd.req.arg = (u32 *)trans->req_pay; + cmd.rsp.arg = (u32 *)trans->rsp_pay; + qlcnic_free_mbx_args(&cmd); + qlcnic_sriov_cleanup_transaction(trans); + } + + spin_unlock_irqrestore(&t_list->lock, flags); +} + void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_back_channel *bc = &sriov->bc; + struct qlcnic_vf_info *vf; int i; if (!qlcnic_sriov_enable_check(adapter)) @@ -203,6 +244,14 @@ void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) qlcnic_sriov_cleanup_async_list(bc); destroy_workqueue(bc->bc_async_wq); + + for (i = 0; i < sriov->num_vfs; i++) { + vf = &sriov->vf_info[i]; + qlcnic_sriov_cleanup_list(&vf->rcv_pend); + cancel_work_sync(&vf->trans_work); + qlcnic_sriov_cleanup_list(&vf->rcv_act); + } + destroy_workqueue(bc->bc_trans_wq); for (i = 0; i < sriov->num_vfs; i++) @@ -286,7 +335,7 @@ poll: /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { - qlcnic_83xx_process_aen(adapter); + __qlcnic_83xx_process_aen(adapter); mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); if (mbx_val) goto poll; @@ -335,16 +384,156 @@ static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) adapter->max_rds_rings = MAX_RDS_RINGS; } +int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u16 vport_id) +{ + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; + int err; + u32 status; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + return err; + + cmd.req.arg[1] = vport_id << 16 | 0x1; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to get vport info, err=%d\n", err); + qlcnic_free_mbx_args(&cmd); + return err; + } + + status = cmd.rsp.arg[2] & 0xffff; + if (status & BIT_0) + npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]); + if (status & BIT_1) + npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]); + if (status & BIT_2) + npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]); + if (status & BIT_3) + npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]); + if (status & BIT_4) + npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]); + if (status & BIT_5) + npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]); + if (status & BIT_6) + npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]); + if (status & BIT_7) + npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]); + if (status & BIT_8) + npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]); + if (status & BIT_9) + npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]); + + npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]); + npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]); + npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]); + npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]); + + dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n" + "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" + "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" + "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" + "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" + "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n", + npar_info->min_tx_bw, npar_info->max_tx_bw, + npar_info->max_tx_ques, npar_info->max_tx_mac_filters, + npar_info->max_rx_mcast_mac_filters, + npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, + npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, + npar_info->max_rx_buf_rings, npar_info->max_rx_ques, + npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, + npar_info->max_remote_ipv6_addrs); + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff; + adapter->flags &= ~QLCNIC_TAGGING_ENABLED; + return 0; +} + +static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + int i, num_vlans; + u16 *vlans; + + if (sriov->allowed_vlans) + return 0; + + sriov->any_vlan = cmd->rsp.arg[2] & 0xf; + if (!sriov->any_vlan) + return 0; + + sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; + num_vlans = sriov->num_allowed_vlans; + sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL); + if (!sriov->allowed_vlans) + return -ENOMEM; + + vlans = (u16 *)&cmd->rsp.arg[3]; + for (i = 0; i < num_vlans; i++) + sriov->allowed_vlans[i] = vlans[i]; + + return 0; +} + +static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_cmd_args cmd; + int ret; + + ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); + if (ret) + return ret; + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n", + ret); + } else { + sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; + switch (sriov->vlan_mode) { + case QLC_GUEST_VLAN_MODE: + ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); + break; + case QLC_PVID_MODE: + ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); + break; + } + } + + qlcnic_free_mbx_args(&cmd); + return ret; +} + static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) { struct qlcnic_info nic_info; struct qlcnic_hardware_context *ahw = adapter->ahw; int err; + err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); + if (err) + return err; + err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); if (err) return -EIO; + err = qlcnic_sriov_get_vf_acl(adapter); + if (err) + return err; + if (qlcnic_83xx_get_port_info(adapter)) return -EIO; @@ -404,6 +593,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, pci_set_drvdata(adapter->pdev, adapter); dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", adapter->netdev->name); + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, + adapter->ahw->idc.delay); return 0; err_out_send_channel_term: @@ -423,27 +614,47 @@ err_out_disable_msi: return err; } +static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) +{ + u32 state; + + do { + msleep(20); + if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT) + return -EIO; + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + } while (state != QLC_83XX_IDC_DEV_READY); + + return 0; +} + int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) { struct qlcnic_hardware_context *ahw = adapter->ahw; + int err; spin_lock_init(&ahw->mbx_lock); - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + set_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); + ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + ahw->reset_context = 0; + adapter->fw_fail_cnt = 0; ahw->msix_supported = 1; + adapter->need_fw_reset = 0; adapter->flags |= QLCNIC_TX_INTR_SHARED; - if (qlcnic_sriov_setup_vf(adapter, pci_using_dac)) - return -EIO; + err = qlcnic_sriov_check_dev_ready(adapter); + if (err) + return err; + + err = qlcnic_sriov_setup_vf(adapter, pci_using_dac); + if (err) + return err; if (qlcnic_read_mac_addr(adapter)) dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); - set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); - adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; - adapter->ahw->reset_context = 0; - adapter->fw_fail_cnt = 0; clear_bit(__QLCNIC_RESETTING, &adapter->state); - adapter->need_fw_reset = 0; return 0; } @@ -651,6 +862,10 @@ static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, work_func_t func) { + if (test_bit(QLC_BC_VF_FLR, &vf->state) || + vf->adapter->need_fw_reset) + return; + INIT_WORK(&vf->trans_work, func); queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); } @@ -768,10 +983,14 @@ static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf, u8 type) { - int err; bool flag = true; + int err = -EIO; while (flag) { + if (test_bit(QLC_BC_VF_FLR, &vf->state) || + vf->adapter->need_fw_reset) + trans->trans_state = QLC_ABORT; + switch (trans->trans_state) { case QLC_INIT: trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; @@ -853,6 +1072,12 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) struct qlcnic_cmd_args cmd; u8 req; + if (adapter->need_fw_reset) + return; + + if (test_bit(QLC_BC_VF_FLR, &vf->state)) + return; + trans = list_first_entry(&vf->rcv_act.wait_list, struct qlcnic_bc_trans, list); adapter = vf->adapter; @@ -906,18 +1131,30 @@ clear_send: clear_bit(QLC_BC_VF_SEND, &vf->state); } -static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, - struct qlcnic_vf_info *vf, - struct qlcnic_bc_trans *trans) +int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans) { struct qlcnic_trans_list *t_list = &vf->rcv_act; - spin_lock(&t_list->lock); t_list->count++; list_add_tail(&trans->list, &t_list->wait_list); if (t_list->count == 1) qlcnic_sriov_schedule_bc_cmd(sriov, vf, qlcnic_sriov_process_bc_cmd); + return 0; +} + +static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans) +{ + struct qlcnic_trans_list *t_list = &vf->rcv_act; + + spin_lock(&t_list->lock); + + __qlcnic_sriov_add_act_list(sriov, vf, trans); + spin_unlock(&t_list->lock); return 0; } @@ -977,6 +1214,9 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, int err; u8 cmd_op; + if (adapter->need_fw_reset) + return; + if (!test_bit(QLC_BC_VF_STATE, &vf->state) && hdr->op_type != QLC_BC_CMD && hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) @@ -1019,6 +1259,10 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, trans->vf = vf; trans->trans_id = hdr->seq_id; trans->curr_req_frag++; + + if (qlcnic_sriov_soft_flr_check(adapter, trans, vf)) + return; + if (trans->curr_req_frag == trans->req_hdr->num_frags) { if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { qlcnic_free_mbx_args(&cmd); @@ -1053,6 +1297,18 @@ static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, } } +static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_adapter *adapter = vf->adapter; + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_sriov_pf_handle_flr(sriov, vf); + else + dev_err(&adapter->pdev->dev, + "Invalid event to VF. VF should not get FLR event\n"); +} + void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) { struct qlcnic_vf_info *vf; @@ -1073,6 +1329,11 @@ void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) if (qlcnic_sriov_channel_free_check(event)) complete(&vf->ch_free_cmpl); + if (qlcnic_sriov_flr_check(event)) { + qlcnic_sriov_handle_flr_event(sriov, vf); + return; + } + if (qlcnic_sriov_bc_msg_check(event)) qlcnic_sriov_handle_msg_event(sriov, vf); } @@ -1103,33 +1364,66 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) return err; } +static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans) +{ + u8 max = QLC_BC_CMD_MAX_RETRY_CNT; + u32 state; + + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + if (state == QLC_83XX_IDC_DEV_READY) { + msleep(20); + clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state); + trans->trans_state = QLC_INIT; + if (++adapter->fw_fail_cnt > max) + return -EIO; + else + return 0; + } + + return -EIO; +} + static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; struct qlcnic_bc_trans *trans; int err; u32 rsp_data, opcode, mbx_err_code, rsp; u16 seq = ++adapter->ahw->sriov->bc.trans_counter; + u8 func = ahw->pci_func; - if (qlcnic_sriov_alloc_bc_trans(&trans)) - return -ENOMEM; + rsp = qlcnic_sriov_alloc_bc_trans(&trans); + if (rsp) + return rsp; - if (qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND)) - return -ENOMEM; + rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); + if (rsp) + goto cleanup_transaction; +retry: if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { rsp = -EIO; QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", - QLCNIC_MBX_RSP(cmd->req.arg[0]), adapter->ahw->pci_func); + QLCNIC_MBX_RSP(cmd->req.arg[0]), func); goto err_out; } - err = qlcnic_sriov_send_bc_cmd(adapter, trans, adapter->ahw->pci_func); + err = qlcnic_sriov_send_bc_cmd(adapter, trans, func); if (err) { - dev_err(&adapter->pdev->dev, - "MBX command 0x%x timed out for VF %d\n", - (cmd->req.arg[0] & 0xffff), adapter->ahw->pci_func); + dev_err(dev, "MBX command 0x%x timed out for VF %d\n", + (cmd->req.arg[0] & 0xffff), func); rsp = QLCNIC_RCODE_TIMEOUT; + + /* After adapter reset PF driver may take some time to + * respond to VF's request. Retry request till maximum retries. + */ + if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && + !qlcnic_sriov_retry_bc_cmd(adapter, trans)) + goto retry; + goto err_out; } @@ -1144,12 +1438,19 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, rsp = mbx_err_code; if (!rsp) rsp = 1; - dev_err(&adapter->pdev->dev, + dev_err(dev, "MBX command 0x%x failed with err:0x%x for VF %d\n", - opcode, mbx_err_code, adapter->ahw->pci_func); + opcode, mbx_err_code, func); } err_out: + if (rsp == QLCNIC_RCODE_TIMEOUT) { + ahw->reset_context = 1; + adapter->need_fw_reset = 1; + clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + } + +cleanup_transaction: qlcnic_sriov_cleanup_transaction(trans); return rsp; } @@ -1184,7 +1485,7 @@ out: return ret; } -void qlcnic_vf_add_mc_list(struct net_device *netdev) +void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_mac_list_s *cur; @@ -1204,7 +1505,7 @@ void qlcnic_vf_add_mc_list(struct net_device *netdev) while (!list_empty(&tmp_list)) { cur = list_entry((&tmp_list)->next, struct qlcnic_mac_list_s, list); - qlcnic_nic_add_mac(adapter, cur->mac_addr); + qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan); list_del(&cur->list); kfree(cur); } @@ -1227,11 +1528,13 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + u16 vlan; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; - __qlcnic_set_multi(netdev); + vlan = adapter->ahw->sriov->vlan; + __qlcnic_set_multi(netdev, vlan); } static void qlcnic_sriov_handle_async_multi(struct work_struct *work) @@ -1292,6 +1595,360 @@ void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; + if (adapter->need_fw_reset) + return; + qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, netdev); } + +static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) +{ + int err; + + set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + qlcnic_83xx_enable_mbx_intrpt(adapter); + + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + return err; + + err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); + if (err) + goto err_out_cleanup_bc_intr; + + err = qlcnic_sriov_vf_init_driver(adapter); + if (err) + goto err_out_term_channel; + + return 0; + +err_out_term_channel: + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + +err_out_cleanup_bc_intr: + qlcnic_sriov_cfg_bc_intr(adapter, 0); + return err; +} + +static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (netif_running(netdev)) { + if (!qlcnic_up(adapter, netdev)) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); +} + +static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl; + struct net_device *netdev = adapter->netdev; + u8 i, max_ints = ahw->num_msix - 1; + + qlcnic_83xx_disable_mbx_intr(adapter); + netif_device_detach(netdev); + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + for (i = 0; i < max_ints; i++) { + intr_tbl[i].id = i; + intr_tbl[i].enabled = 0; + intr_tbl[i].src = 0; + } + ahw->reset_context = 0; +} + +static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + struct qlc_83xx_idc *idc = &ahw->idc; + u8 func = ahw->pci_func; + u32 state; + + if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || + (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) { + if (!qlcnic_sriov_vf_reinit_driver(adapter)) { + qlcnic_sriov_vf_attach(adapter); + adapter->fw_fail_cnt = 0; + dev_info(dev, + "%s: Reinitalization of VF 0x%x done after FW reset\n", + __func__, func); + } else { + dev_err(dev, + "%s: Reinitialization of VF 0x%x failed after FW reset\n", + __func__, func); + state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); + dev_info(dev, "Current state 0x%x after FW reset\n", + state); + } + } + + return 0; +} + +static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + struct qlc_83xx_idc *idc = &ahw->idc; + u8 func = ahw->pci_func; + u32 state; + + adapter->reset_ctx_cnt++; + + /* Skip the context reset and check if FW is hung */ + if (adapter->reset_ctx_cnt < 3) { + adapter->need_fw_reset = 1; + clear_bit(QLC_83XX_MBX_READY, &idc->status); + dev_info(dev, + "Resetting context, wait here to check if FW is in failed state\n"); + return 0; + } + + /* Check if number of resets exceed the threshold. + * If it exceeds the threshold just fail the VF. + */ + if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) { + clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); + adapter->tx_timeo_cnt = 0; + adapter->fw_fail_cnt = 0; + adapter->reset_ctx_cnt = 0; + qlcnic_sriov_vf_detach(adapter); + dev_err(dev, + "Device context resets have exceeded the threshold, device interface will be shutdown\n"); + return -EIO; + } + + dev_info(dev, "Resetting context of VF 0x%x\n", func); + dev_info(dev, "%s: Context reset count %d for VF 0x%x\n", + __func__, adapter->reset_ctx_cnt, func); + set_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->need_fw_reset = 1; + clear_bit(QLC_83XX_MBX_READY, &idc->status); + qlcnic_sriov_vf_detach(adapter); + adapter->need_fw_reset = 0; + + if (!qlcnic_sriov_vf_reinit_driver(adapter)) { + qlcnic_sriov_vf_attach(adapter); + adapter->netdev->trans_start = jiffies; + adapter->tx_timeo_cnt = 0; + adapter->reset_ctx_cnt = 0; + adapter->fw_fail_cnt = 0; + dev_info(dev, "Done resetting context for VF 0x%x\n", func); + } else { + dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n", + __func__, func); + state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); + dev_info(dev, "%s: Current state 0x%x\n", __func__, state); + } + + return 0; +} + +static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int ret = 0; + + if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) + ret = qlcnic_sriov_vf_handle_dev_ready(adapter); + else if (ahw->reset_context) + ret = qlcnic_sriov_vf_handle_context_reset(adapter); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + + dev_err(&adapter->pdev->dev, "Device is in failed state\n"); + if (idc->prev_state == QLC_83XX_IDC_DEV_READY) + qlcnic_sriov_vf_detach(adapter); + + clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return -EIO; +} + +static int +qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + + dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); + if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { + set_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->tx_timeo_cnt = 0; + adapter->reset_ctx_cnt = 0; + clear_bit(QLC_83XX_MBX_READY, &idc->status); + qlcnic_sriov_vf_detach(adapter); + } + + return 0; +} + +static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + u8 func = adapter->ahw->pci_func; + + if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { + dev_err(&adapter->pdev->dev, + "Firmware hang detected by VF 0x%x\n", func); + set_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->tx_timeo_cnt = 0; + adapter->reset_ctx_cnt = 0; + clear_bit(QLC_83XX_MBX_READY, &idc->status); + qlcnic_sriov_vf_detach(adapter); + } + return 0; +} + +static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter) +{ + dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); + return 0; +} + +static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + struct qlc_83xx_idc *idc; + int ret = 0; + + adapter = container_of(work, struct qlcnic_adapter, fw_work.work); + idc = &adapter->ahw->idc; + idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + + switch (idc->curr_state) { + case QLC_83XX_IDC_DEV_READY: + ret = qlcnic_sriov_vf_idc_ready_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_RESET: + case QLC_83XX_IDC_DEV_INIT: + ret = qlcnic_sriov_vf_idc_init_reset_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_QUISCENT: + ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter); + break; + case QLC_83XX_IDC_DEV_FAILED: + ret = qlcnic_sriov_vf_idc_failed_state(adapter); + break; + case QLC_83XX_IDC_DEV_QUISCENT: + break; + default: + ret = qlcnic_sriov_vf_idc_unknown_state(adapter); + } + + idc->prev_state = idc->curr_state; + if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, + idc->delay); +} + +static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter) +{ + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + msleep(20); + + clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + cancel_delayed_work_sync(&adapter->fw_work); +} + +static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov, + u16 vid, u8 enable) +{ + u16 vlan = sriov->vlan; + u8 allowed = 0; + int i; + + if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) + return -EINVAL; + + if (enable) { + if (vlan) + return -EINVAL; + + if (sriov->any_vlan) { + for (i = 0; i < sriov->num_allowed_vlans; i++) { + if (sriov->allowed_vlans[i] == vid) + allowed = 1; + } + + if (!allowed) + return -EINVAL; + } + } else { + if (!vlan || vlan != vid) + return -EINVAL; + } + + return 0; +} + +int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, + u16 vid, u8 enable) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_cmd_args cmd; + int ret; + + if (vid == 0) + return 0; + + ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable); + if (ret) + return ret; + + ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, + QLCNIC_BC_CMD_CFG_GUEST_VLAN); + if (ret) + return ret; + + cmd.req.arg[1] = (enable & 1) | vid << 16; + + qlcnic_sriov_cleanup_async_list(&sriov->bc); + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, + "Failed to configure guest VLAN, err=%d\n", ret); + } else { + qlcnic_free_mac_list(adapter); + + if (enable) + sriov->vlan = vid; + else + sriov->vlan = 0; + + qlcnic_sriov_vf_set_multi(adapter->netdev); + } + + qlcnic_free_mbx_args(&cmd); + return ret; +} + +static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) +{ + struct list_head *head = &adapter->mac_list; + struct qlcnic_mac_list_s *cur; + u16 vlan; + + vlan = adapter->ahw->sriov->vlan; + + while (!list_empty(head)) { + cur = list_entry(head->next, struct qlcnic_mac_list_s, list); + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + vlan, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} |