FreeBSD Bugzilla – Attachment 132077 Details for
Bug 176281
[ixgbe] [patch] Update ixgbe to 2.4.10 (latest official driver)
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
file.diff
file.diff (text/plain), 52.66 KB, created by
Marcelo Araujo
on 2013-02-20 04:50:00 UTC
(
hide
)
Description:
file.diff
Filename:
MIME Type:
Creator:
Marcelo Araujo
Created:
2013-02-20 04:50:00 UTC
Size:
52.66 KB
patch
obsolete
>Index: ixgbe_vf.h >=================================================================== >--- ixgbe_vf.h (revision 247019) >+++ ixgbe_vf.h (working copy) >@@ -39,6 +39,9 @@ > #define IXGBE_VF_MAX_TX_QUEUES 8 > #define IXGBE_VF_MAX_RX_QUEUES 8 > >+/* DCB define */ >+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8 >+ > #define IXGBE_VFCTRL 0x00000 > #define IXGBE_VFSTATUS 0x00008 > #define IXGBE_VFLINKS 0x00010 >@@ -127,5 +130,9 @@ > u32 mc_addr_count, ixgbe_mc_addr_itr, > bool clear); > s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); >+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); >+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); >+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, >+ unsigned int *default_tc); > > #endif /* __IXGBE_VF_H__ */ >Index: ixgbe_common.c >=================================================================== >--- ixgbe_common.c (revision 247019) >+++ ixgbe_common.c (working copy) >@@ -174,6 +174,7 @@ > s32 ret_val = IXGBE_SUCCESS; > u32 reg = 0, reg_bp = 0; > u16 reg_cu = 0; >+ bool got_lock = FALSE; > > DEBUGFUNC("ixgbe_setup_fc"); > >@@ -297,7 +298,28 @@ > */ > if (hw->phy.media_type == ixgbe_media_type_backplane) { > reg_bp |= IXGBE_AUTOC_AN_RESTART; >+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and >+ * LESM is on, likewise reset_pipeline requries the lock as >+ * it also writes AUTOC. >+ */ >+ if ((hw->mac.type == ixgbe_mac_82599EB) && >+ ixgbe_verify_lesm_fw_enabled_82599(hw)) { >+ ret_val = hw->mac.ops.acquire_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ if (ret_val != IXGBE_SUCCESS) { >+ ret_val = IXGBE_ERR_SWFW_SYNC; >+ goto out; >+ } >+ got_lock = TRUE; >+ } >+ > IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); >+ if (hw->mac.type == ixgbe_mac_82599EB) >+ ixgbe_reset_pipeline_82599(hw); >+ >+ if (got_lock) >+ hw->mac.ops.release_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); > } else if ((hw->phy.media_type == ixgbe_media_type_copper) && > (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) { > hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, >@@ -680,6 +702,195 @@ > } > > /** >+ * ixgbe_read_pba_raw >+ * @hw: pointer to the HW structure >+ * @eeprom_buf: optional pointer to EEPROM image >+ * @eeprom_buf_size: size of EEPROM image in words >+ * @max_pba_block_size: PBA block size limit >+ * @pba: pointer to output PBA structure >+ * >+ * Reads PBA from EEPROM image when eeprom_buf is not NULL. >+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL. >+ * >+ **/ >+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, >+ u32 eeprom_buf_size, u16 max_pba_block_size, >+ struct ixgbe_pba *pba) >+{ >+ s32 ret_val; >+ u16 pba_block_size; >+ >+ if (pba == NULL) >+ return IXGBE_ERR_PARAM; >+ >+ if (eeprom_buf == NULL) { >+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, >+ &pba->word[0]); >+ if (ret_val) >+ return ret_val; >+ } else { >+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { >+ pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; >+ pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; >+ } else { >+ return IXGBE_ERR_PARAM; >+ } >+ } >+ >+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { >+ if (pba->pba_block == NULL) >+ return IXGBE_ERR_PARAM; >+ >+ ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, >+ eeprom_buf_size, >+ &pba_block_size); >+ if (ret_val) >+ return ret_val; >+ >+ if (pba_block_size > max_pba_block_size) >+ return IXGBE_ERR_PARAM; >+ >+ if (eeprom_buf == NULL) { >+ ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], >+ pba_block_size, >+ pba->pba_block); >+ if (ret_val) >+ return ret_val; >+ } else { >+ if (eeprom_buf_size > (u32)(pba->word[1] + >+ pba->pba_block[0])) { >+ memcpy(pba->pba_block, >+ &eeprom_buf[pba->word[1]], >+ pba_block_size * sizeof(u16)); >+ } else { >+ return IXGBE_ERR_PARAM; >+ } >+ } >+ } >+ >+ return IXGBE_SUCCESS; >+} >+ >+/** >+ * ixgbe_write_pba_raw >+ * @hw: pointer to the HW structure >+ * @eeprom_buf: optional pointer to EEPROM image >+ * @eeprom_buf_size: size of EEPROM image in words >+ * @pba: pointer to PBA structure >+ * >+ * Writes PBA to EEPROM image when eeprom_buf is not NULL. >+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL. >+ * >+ **/ >+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, >+ u32 eeprom_buf_size, struct ixgbe_pba *pba) >+{ >+ s32 ret_val; >+ >+ if (pba == NULL) >+ return IXGBE_ERR_PARAM; >+ >+ if (eeprom_buf == NULL) { >+ ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, >+ &pba->word[0]); >+ if (ret_val) >+ return ret_val; >+ } else { >+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { >+ eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; >+ eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; >+ } else { >+ return IXGBE_ERR_PARAM; >+ } >+ } >+ >+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { >+ if (pba->pba_block == NULL) >+ return IXGBE_ERR_PARAM; >+ >+ if (eeprom_buf == NULL) { >+ ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], >+ pba->pba_block[0], >+ pba->pba_block); >+ if (ret_val) >+ return ret_val; >+ } else { >+ if (eeprom_buf_size > (u32)(pba->word[1] + >+ pba->pba_block[0])) { >+ memcpy(&eeprom_buf[pba->word[1]], >+ pba->pba_block, >+ pba->pba_block[0] * sizeof(u16)); >+ } else { >+ return IXGBE_ERR_PARAM; >+ } >+ } >+ } >+ >+ return IXGBE_SUCCESS; >+} >+ >+/** >+ * ixgbe_get_pba_block_size >+ * @hw: pointer to the HW structure >+ * @eeprom_buf: optional pointer to EEPROM image >+ * @eeprom_buf_size: size of EEPROM image in words >+ * @pba_data_size: pointer to output variable >+ * >+ * Returns the size of the PBA block in words. Function operates on EEPROM >+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical >+ * EEPROM device. >+ * >+ **/ >+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, >+ u32 eeprom_buf_size, u16 *pba_block_size) >+{ >+ s32 ret_val; >+ u16 pba_word[2]; >+ u16 length; >+ >+ DEBUGFUNC("ixgbe_get_pba_block_size"); >+ >+ if (eeprom_buf == NULL) { >+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, >+ &pba_word[0]); >+ if (ret_val) >+ return ret_val; >+ } else { >+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { >+ pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; >+ pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; >+ } else { >+ return IXGBE_ERR_PARAM; >+ } >+ } >+ >+ if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { >+ if (eeprom_buf == NULL) { >+ ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, >+ &length); >+ if (ret_val) >+ return ret_val; >+ } else { >+ if (eeprom_buf_size > pba_word[1]) >+ length = eeprom_buf[pba_word[1] + 0]; >+ else >+ return IXGBE_ERR_PARAM; >+ } >+ >+ if (length == 0xFFFF || length == 0) >+ return IXGBE_ERR_PBA_SECTION; >+ } else { >+ /* PBA number in legacy format, there is no PBA Block. */ >+ length = 0; >+ } >+ >+ if (pba_block_size != NULL) >+ *pba_block_size = length; >+ >+ return IXGBE_SUCCESS; >+} >+ >+/** > * ixgbe_get_mac_addr_generic - Generic get MAC address > * @hw: pointer to hardware structure > * @mac_addr: Adapter MAC address >@@ -2965,6 +3176,7 @@ > bool link_up = 0; > u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); > u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); >+ s32 ret_val = IXGBE_SUCCESS; > > DEBUGFUNC("ixgbe_blink_led_start_generic"); > >@@ -2975,10 +3187,29 @@ > hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); > > if (!link_up) { >+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and >+ * LESM is on. >+ */ >+ bool got_lock = FALSE; >+ if ((hw->mac.type == ixgbe_mac_82599EB) && >+ ixgbe_verify_lesm_fw_enabled_82599(hw)) { >+ ret_val = hw->mac.ops.acquire_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ if (ret_val != IXGBE_SUCCESS) { >+ ret_val = IXGBE_ERR_SWFW_SYNC; >+ goto out; >+ } >+ got_lock = TRUE; >+ } >+ > autoc_reg |= IXGBE_AUTOC_AN_RESTART; > autoc_reg |= IXGBE_AUTOC_FLU; > IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); > IXGBE_WRITE_FLUSH(hw); >+ >+ if (got_lock) >+ hw->mac.ops.release_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); > msec_delay(10); > } > >@@ -2987,7 +3218,8 @@ > IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); > IXGBE_WRITE_FLUSH(hw); > >- return IXGBE_SUCCESS; >+out: >+ return ret_val; > } > > /** >@@ -2999,21 +3231,43 @@ > { > u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); > u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); >+ s32 ret_val = IXGBE_SUCCESS; >+ bool got_lock = FALSE; > > DEBUGFUNC("ixgbe_blink_led_stop_generic"); >+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and >+ * LESM is on. >+ */ >+ if ((hw->mac.type == ixgbe_mac_82599EB) && >+ ixgbe_verify_lesm_fw_enabled_82599(hw)) { >+ ret_val = hw->mac.ops.acquire_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ if (ret_val != IXGBE_SUCCESS) { >+ ret_val = IXGBE_ERR_SWFW_SYNC; >+ goto out; >+ } >+ got_lock = TRUE; >+ } > > > autoc_reg &= ~IXGBE_AUTOC_FLU; > autoc_reg |= IXGBE_AUTOC_AN_RESTART; > IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); > >+ if (hw->mac.type == ixgbe_mac_82599EB) >+ ixgbe_reset_pipeline_82599(hw); >+ >+ if (got_lock) >+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); >+ > led_reg &= ~IXGBE_LED_MODE_MASK(index); > led_reg &= ~IXGBE_LED_BLINK(index); > led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); > IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); > IXGBE_WRITE_FLUSH(hw); > >- return IXGBE_SUCCESS; >+out: >+ return ret_val; > } > > /** >Index: ixgbe_common.h >=================================================================== >--- ixgbe_common.h (revision 247019) >+++ ixgbe_common.h (working copy) >@@ -41,6 +41,12 @@ > IXGBE_WRITE_REG(hw, reg, (u32) value); \ > IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \ > } while (0) >+#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW) >+struct ixgbe_pba { >+ u16 word[2]; >+ u16 *pba_block; >+}; >+#endif > > u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); > >@@ -52,6 +58,13 @@ > s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); > s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, > u32 pba_num_size); >+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, >+ u32 eeprom_buf_size, u16 max_pba_block_size, >+ struct ixgbe_pba *pba); >+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, >+ u32 eeprom_buf_size, struct ixgbe_pba *pba); >+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, >+ u32 eeprom_buf_size, u16 *pba_block_size); > s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); > s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); > void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); >@@ -138,4 +151,7 @@ > s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, > u8 build, u8 ver); > void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); >+ >+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); >+ > #endif /* IXGBE_COMMON */ >Index: ixgbe_82599.c >=================================================================== >--- ixgbe_82599.c (revision 247019) >+++ ixgbe_82599.c (working copy) >@@ -135,9 +135,8 @@ > s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) > { > s32 ret_val = IXGBE_SUCCESS; >- u32 reg_anlp1 = 0; >- u32 i = 0; > u16 list_offset, data_offset, data_value; >+ bool got_lock = FALSE; > > DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); > >@@ -171,28 +170,39 @@ > /* Delay obtaining semaphore again to allow FW access */ > msec_delay(hw->eeprom.semaphore_delay); > >- /* Now restart DSP by setting Restart_AN and clearing LMS */ >- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, >- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | >- IXGBE_AUTOC_AN_RESTART)); >+ /* Need SW/FW semaphore around AUTOC writes if LESM on, >+ * likewise reset_pipeline requires lock as it also writes >+ * AUTOC. >+ */ >+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { >+ ret_val = hw->mac.ops.acquire_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ if (ret_val != IXGBE_SUCCESS) { >+ ret_val = IXGBE_ERR_SWFW_SYNC; >+ goto setup_sfp_out; >+ } > >- /* Wait for AN to leave state 0 */ >- for (i = 0; i < 10; i++) { >- msec_delay(4); >- reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); >- if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) >- break; >+ got_lock = TRUE; > } >- if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { >+ >+ /* Restart DSP and set SFI mode */ >+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, >+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL)); >+ >+ ret_val = ixgbe_reset_pipeline_82599(hw); >+ >+ if (got_lock) { >+ hw->mac.ops.release_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ got_lock = FALSE; >+ } >+ >+ if (ret_val) { > DEBUGOUT("sfp module setup not complete\n"); > ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; > goto setup_sfp_out; > } > >- /* Restart DSP by setting Restart_AN and return to SFI mode */ >- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, >- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | >- IXGBE_AUTOC_AN_RESTART)); > } > > setup_sfp_out: >@@ -424,6 +434,7 @@ > case IXGBE_DEV_ID_82599_SFP_FCOE: > case IXGBE_DEV_ID_82599_SFP_EM: > case IXGBE_DEV_ID_82599_SFP_SF2: >+ case IXGBE_DEV_ID_82599_SFP_SF_QP: > case IXGBE_DEV_ID_82599EN_SFP: > media_type = ixgbe_media_type_fiber; > break; >@@ -456,17 +467,32 @@ > u32 links_reg; > u32 i; > s32 status = IXGBE_SUCCESS; >+ bool got_lock = FALSE; > > DEBUGFUNC("ixgbe_start_mac_link_82599"); > > >+ /* reset_pipeline requires us to hold this lock as it writes to >+ * AUTOC. >+ */ >+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { >+ status = hw->mac.ops.acquire_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ if (status != IXGBE_SUCCESS) >+ goto out; >+ >+ got_lock = TRUE; >+ } >+ > /* Restart link */ >- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); >- autoc_reg |= IXGBE_AUTOC_AN_RESTART; >- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); >+ ixgbe_reset_pipeline_82599(hw); > >+ if (got_lock) >+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); >+ > /* Only poll for autoneg to complete if specified to do so */ > if (autoneg_wait_to_complete) { >+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); > if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == > IXGBE_AUTOC_LMS_KX4_KX_KR || > (autoc_reg & IXGBE_AUTOC_LMS_MASK) == >@@ -490,6 +516,7 @@ > /* Add delay to filter out noises during initial link setup */ > msec_delay(50); > >+out: > return status; > } > >@@ -842,12 +869,13 @@ > u32 links_reg; > u32 i; > ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; >+ bool got_lock = FALSE; > > DEBUGFUNC("ixgbe_setup_mac_link_82599"); > > /* Check to see if speed passed in is supported. */ > status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); >- if (status != IXGBE_SUCCESS) >+ if (status) > goto out; > > speed &= link_capabilities; >@@ -868,12 +896,13 @@ > link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { > /* Set KX4/KX/KR support according to speed requested */ > autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); >- if (speed & IXGBE_LINK_SPEED_10GB_FULL) >+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { > if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) > autoc |= IXGBE_AUTOC_KX4_SUPP; > if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && > (hw->phy.smart_speed_active == FALSE)) > autoc |= IXGBE_AUTOC_KR_SUPP; >+ } > if (speed & IXGBE_LINK_SPEED_1GB_FULL) > autoc |= IXGBE_AUTOC_KX_SUPP; > } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && >@@ -899,10 +928,31 @@ > } > > if (autoc != start_autoc) { >+ /* Need SW/FW semaphore around AUTOC writes if LESM is on, >+ * likewise reset_pipeline requires us to hold this lock as >+ * it also writes to AUTOC. >+ */ >+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { >+ status = hw->mac.ops.acquire_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ if (status != IXGBE_SUCCESS) { >+ status = IXGBE_ERR_SWFW_SYNC; >+ goto out; >+ } >+ >+ got_lock = TRUE; >+ } >+ > /* Restart link */ >- autoc |= IXGBE_AUTOC_AN_RESTART; > IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); >+ ixgbe_reset_pipeline_82599(hw); > >+ if (got_lock) { >+ hw->mac.ops.release_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ got_lock = FALSE; >+ } >+ > /* Only poll for autoneg to complete if specified to do so */ > if (autoneg_wait_to_complete) { > if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || >@@ -1060,10 +1110,31 @@ > hw->mac.orig_autoc2 = autoc2; > hw->mac.orig_link_settings_stored = TRUE; > } else { >- if (autoc != hw->mac.orig_autoc) >- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | >- IXGBE_AUTOC_AN_RESTART)); >+ if (autoc != hw->mac.orig_autoc) { >+ /* Need SW/FW semaphore around AUTOC writes if LESM is >+ * on, likewise reset_pipeline requires us to hold >+ * this lock as it also writes to AUTOC. >+ */ >+ bool got_lock = FALSE; >+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { >+ status = hw->mac.ops.acquire_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ if (status != IXGBE_SUCCESS) { >+ status = IXGBE_ERR_SWFW_SYNC; >+ goto reset_hw_out; >+ } > >+ got_lock = TRUE; >+ } >+ >+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); >+ ixgbe_reset_pipeline_82599(hw); >+ >+ if (got_lock) >+ hw->mac.ops.release_swfw_sync(hw, >+ IXGBE_GSSR_MAC_CSR_SM); >+ } >+ > if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != > (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { > autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; >@@ -1167,7 +1238,7 @@ > if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & > IXGBE_FDIRCTRL_INIT_DONE) > break; >- usec_delay(10); >+ msec_delay(1); > } > if (i >= IXGBE_FDIR_INIT_DONE_POLL) { > DEBUGOUT("Flow Director Signature poll time exceeded!\n"); >@@ -2093,7 +2164,7 @@ > * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or > * if the FW version is not supported. > **/ >-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) >+s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) > { > s32 status = IXGBE_ERR_EEPROM_VERSION; > u16 fw_offset, fw_ptp_cfg_offset; >@@ -2242,4 +2313,46 @@ > return ret_val; > } > >+/** >+ * ixgbe_reset_pipeline_82599 - perform pipeline reset >+ * >+ * @hw: pointer to hardware structure >+ * >+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure >+ * full pipeline reset >+ **/ >+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) >+{ >+ s32 i, autoc_reg, ret_val; >+ s32 anlp1_reg = 0; > >+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); >+ autoc_reg |= IXGBE_AUTOC_AN_RESTART; >+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ >+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN); >+ /* Wait for AN to leave state 0 */ >+ for (i = 0; i < 10; i++) { >+ msec_delay(4); >+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); >+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) >+ break; >+ } >+ >+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { >+ DEBUGOUT("auto negotiation not completed\n"); >+ ret_val = IXGBE_ERR_RESET_FAILED; >+ goto reset_pipeline_out; >+ } >+ >+ ret_val = IXGBE_SUCCESS; >+ >+reset_pipeline_out: >+ /* Write AUTOC register with original LMS field and Restart_AN */ >+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); >+ IXGBE_WRITE_FLUSH(hw); >+ >+ return ret_val; >+} >+ >+ >+ >Index: ixgbe_82599.h >=================================================================== >--- ixgbe_82599.h (revision 247019) >+++ ixgbe_82599.h (working copy) >@@ -61,5 +61,4 @@ > s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); > u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); > s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); >-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); > #endif /* _IXGBE_82599_H_ */ >Index: ixgbe_api.c >=================================================================== >--- ixgbe_api.c (revision 247019) >+++ ixgbe_api.c (working copy) >@@ -93,53 +93,50 @@ > > DEBUGFUNC("ixgbe_set_mac_type\n"); > >- if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) { >- switch (hw->device_id) { >- case IXGBE_DEV_ID_82598: >- case IXGBE_DEV_ID_82598_BX: >- case IXGBE_DEV_ID_82598AF_SINGLE_PORT: >- case IXGBE_DEV_ID_82598AF_DUAL_PORT: >- case IXGBE_DEV_ID_82598AT: >- case IXGBE_DEV_ID_82598AT2: >- case IXGBE_DEV_ID_82598EB_CX4: >- case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: >- case IXGBE_DEV_ID_82598_DA_DUAL_PORT: >- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: >- case IXGBE_DEV_ID_82598EB_XF_LR: >- case IXGBE_DEV_ID_82598EB_SFP_LOM: >- hw->mac.type = ixgbe_mac_82598EB; >- break; >- case IXGBE_DEV_ID_82599_KX4: >- case IXGBE_DEV_ID_82599_KX4_MEZZ: >- case IXGBE_DEV_ID_82599_XAUI_LOM: >- case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: >- case IXGBE_DEV_ID_82599_KR: >- case IXGBE_DEV_ID_82599_SFP: >- case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: >- case IXGBE_DEV_ID_82599_SFP_FCOE: >- case IXGBE_DEV_ID_82599_SFP_EM: >- case IXGBE_DEV_ID_82599_SFP_SF2: >- case IXGBE_DEV_ID_82599EN_SFP: >- case IXGBE_DEV_ID_82599_CX4: >- case IXGBE_DEV_ID_82599_T3_LOM: >- hw->mac.type = ixgbe_mac_82599EB; >- break; >- case IXGBE_DEV_ID_82599_VF: >- hw->mac.type = ixgbe_mac_82599_vf; >- break; >- case IXGBE_DEV_ID_X540_VF: >- hw->mac.type = ixgbe_mac_X540_vf; >- break; >- case IXGBE_DEV_ID_X540T: >- case IXGBE_DEV_ID_X540T1: >- hw->mac.type = ixgbe_mac_X540; >- break; >- default: >- ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; >- break; >- } >- } else { >+ switch (hw->device_id) { >+ case IXGBE_DEV_ID_82598: >+ case IXGBE_DEV_ID_82598_BX: >+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT: >+ case IXGBE_DEV_ID_82598AF_DUAL_PORT: >+ case IXGBE_DEV_ID_82598AT: >+ case IXGBE_DEV_ID_82598AT2: >+ case IXGBE_DEV_ID_82598EB_CX4: >+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: >+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT: >+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: >+ case IXGBE_DEV_ID_82598EB_XF_LR: >+ case IXGBE_DEV_ID_82598EB_SFP_LOM: >+ hw->mac.type = ixgbe_mac_82598EB; >+ break; >+ case IXGBE_DEV_ID_82599_KX4: >+ case IXGBE_DEV_ID_82599_KX4_MEZZ: >+ case IXGBE_DEV_ID_82599_XAUI_LOM: >+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: >+ case IXGBE_DEV_ID_82599_KR: >+ case IXGBE_DEV_ID_82599_SFP: >+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: >+ case IXGBE_DEV_ID_82599_SFP_FCOE: >+ case IXGBE_DEV_ID_82599_SFP_EM: >+ case IXGBE_DEV_ID_82599_SFP_SF2: >+ case IXGBE_DEV_ID_82599_SFP_SF_QP: >+ case IXGBE_DEV_ID_82599EN_SFP: >+ case IXGBE_DEV_ID_82599_CX4: >+ case IXGBE_DEV_ID_82599_T3_LOM: >+ hw->mac.type = ixgbe_mac_82599EB; >+ break; >+ case IXGBE_DEV_ID_82599_VF: >+ hw->mac.type = ixgbe_mac_82599_vf; >+ break; >+ case IXGBE_DEV_ID_X540_VF: >+ hw->mac.type = ixgbe_mac_X540_vf; >+ break; >+ case IXGBE_DEV_ID_X540T: >+ case IXGBE_DEV_ID_X540T1: >+ hw->mac.type = ixgbe_mac_X540; >+ break; >+ default: > ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; >+ break; > } > > DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", >Index: ixgbe_api.h >=================================================================== >--- ixgbe_api.h (revision 247019) >+++ ixgbe_api.h (working copy) >@@ -159,6 +159,7 @@ > union ixgbe_atr_input *mask); > u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, > union ixgbe_atr_hash_dword common); >+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); > s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, > u8 *data); > s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, >Index: ixgbe.c >=================================================================== >--- ixgbe.c (revision 247019) >+++ ixgbe.c (working copy) >@@ -32,6 +32,7 @@ > ******************************************************************************/ > /*$FreeBSD$*/ > >+ > #ifdef HAVE_KERNEL_OPTION_HEADERS > #include "opt_inet.h" > #include "opt_inet6.h" >@@ -47,7 +48,7 @@ > /********************************************************************* > * Driver version > *********************************************************************/ >-char ixgbe_driver_version[] = "2.4.8"; >+char ixgbe_driver_version[] = "2.4.10"; > > /********************************************************************* > * PCI Device ID Table >@@ -83,6 +84,7 @@ > {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, > {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, > {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, >+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, > {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, > {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, > /* required last entry */ >@@ -104,13 +106,15 @@ > static int ixgbe_attach(device_t); > static int ixgbe_detach(device_t); > static int ixgbe_shutdown(device_t); >-static void ixgbe_start(struct ifnet *); >-static void ixgbe_start_locked(struct tx_ring *, struct ifnet *); > #if __FreeBSD_version >= 800000 > static int ixgbe_mq_start(struct ifnet *, struct mbuf *); > static int ixgbe_mq_start_locked(struct ifnet *, > struct tx_ring *, struct mbuf *); > static void ixgbe_qflush(struct ifnet *); >+static void ixgbe_deferred_mq_start(void *, int); >+#else >+static void ixgbe_start(struct ifnet *); >+static void ixgbe_start_locked(struct tx_ring *, struct ifnet *); > #endif > static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); > static void ixgbe_init(void *); >@@ -164,7 +168,7 @@ > static void ixgbe_add_rx_process_limit(struct adapter *, const char *, > const char *, int *, int); > static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *); >-static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *); >+static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *); > static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); > static void ixgbe_configure_ivars(struct adapter *); > static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); >@@ -536,7 +540,6 @@ > case IXGBE_ERR_SFP_NOT_SUPPORTED: > device_printf(dev,"Unsupported SFP+ Module\n"); > error = EIO; >- device_printf(dev,"Hardware Initialization Failure\n"); > goto err_late; > case IXGBE_ERR_SFP_NOT_PRESENT: > device_printf(dev,"No SFP+ Module found\n"); >@@ -631,6 +634,7 @@ > { > struct adapter *adapter = device_get_softc(dev); > struct ix_queue *que = adapter->queues; >+ struct tx_ring *txr = adapter->tx_rings; > u32 ctrl_ext; > > INIT_DEBUGOUT("ixgbe_detach: begin"); >@@ -645,8 +649,11 @@ > ixgbe_stop(adapter); > IXGBE_CORE_UNLOCK(adapter); > >- for (int i = 0; i < adapter->num_queues; i++, que++) { >+ for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { > if (que->tq) { >+#if __FreeBSD_version >= 800000 >+ taskqueue_drain(que->tq, &txr->txq_task); >+#endif > taskqueue_drain(que->tq, &que->que_task); > taskqueue_free(que->tq); > } >@@ -708,6 +715,7 @@ > } > > >+#if __FreeBSD_version < 800000 > /********************************************************************* > * Transmit entry point > * >@@ -726,15 +734,13 @@ > > IXGBE_TX_LOCK_ASSERT(txr); > >- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != >- IFF_DRV_RUNNING) >+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) > return; > if (!adapter->link_active) > return; > > while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { >- if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) { >- txr->queue_status |= IXGBE_QUEUE_DEPLETED; >+ if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) > break; > } > >@@ -745,8 +751,6 @@ > if (ixgbe_xmit(txr, &m_head)) { > if (m_head != NULL) > IFQ_DRV_PREPEND(&ifp->if_snd, m_head); >- if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) >- txr->queue_status |= IXGBE_QUEUE_DEPLETED; > break; > } > /* Send a copy of the frame to the BPF listener */ >@@ -779,7 +783,7 @@ > return; > } > >-#if __FreeBSD_version >= 800000 >+#else /* __FreeBSD_version >= 800000 */ > /* > ** Multiqueue Transmit driver > ** >@@ -795,19 +799,16 @@ > /* Which queue to use */ > if ((m->m_flags & M_FLOWID) != 0) > i = m->m_pkthdr.flowid % adapter->num_queues; >- else >- i = curcpu % adapter->num_queues; > > txr = &adapter->tx_rings[i]; > que = &adapter->queues[i]; > >- if (((txr->queue_status & IXGBE_QUEUE_DEPLETED) == 0) && >- IXGBE_TX_TRYLOCK(txr)) { >+ if (IXGBE_TX_TRYLOCK(txr)) { > err = ixgbe_mq_start_locked(ifp, txr, m); > IXGBE_TX_UNLOCK(txr); > } else { > err = drbr_enqueue(ifp, txr->br, m); >- taskqueue_enqueue(que->tq, &que->que_task); >+ taskqueue_enqueue(que->tq, &txr->txq_task); > } > > return (err); >@@ -821,7 +822,6 @@ > int enqueued, err = 0; > > if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || >- (txr->queue_status == IXGBE_QUEUE_DEPLETED) || > adapter->link_active == 0) { > if (m != NULL) > err = drbr_enqueue(ifp, txr->br, m); >@@ -851,12 +851,6 @@ > ETHER_BPF_MTAP(ifp, next); > if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) > break; >- if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) >- ixgbe_txeof(txr); >- if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) { >- txr->queue_status |= IXGBE_QUEUE_DEPLETED; >- break; >- } > next = drbr_dequeue(ifp, txr->br); > } > >@@ -873,6 +867,22 @@ > } > > /* >+ * Called from a taskqueue to drain queued tx packets >+ */ >+static void >+ixgbe_deferred_mq_start(void *arg, int pending) >+{ >+ struct tx_ring *txr = arg; >+ struct adapter *adapter = txr->adapter; >+ struct ifnet *ifp = adapter->ifp; >+ >+ IXGBE_TX_LOCK(txr); >+ if (!drbr_empty(ifp, txr->br)) >+ ixgbe_mq_start_locked(ifp, txr, NULL); >+ IXGBE_TX_UNLOCK(txr); >+} >+ >+/* > ** Flush all ring buffers > */ > static void >@@ -992,6 +1002,10 @@ > ifp->if_capenable ^= IFCAP_HWCSUM; > if (mask & IFCAP_TSO4) > ifp->if_capenable ^= IFCAP_TSO4; >+#if __FreeBSD_version >= 900505 >+ if (mask & IFCAP_TSO6) >+ ifp->if_capenable ^= IFCAP_TSO6; >+#endif > if (mask & IFCAP_LRO) > ifp->if_capenable ^= IFCAP_LRO; > if (mask & IFCAP_VLAN_HWTAGGING) >@@ -1056,7 +1070,7 @@ > > /* Set the various hardware offload abilities */ > ifp->if_hwassist = 0; >- if (ifp->if_capenable & IFCAP_TSO4) >+ if (ifp->if_capenable & IFCAP_TSO) > ifp->if_hwassist |= CSUM_TSO; > if (ifp->if_capenable & IFCAP_TXCSUM) { > ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); >@@ -1145,7 +1159,7 @@ > * from the Intel linux driver 3.8.21. > * Prefetching enables tx line rate even with 1 queue. > */ >- txdctl |= (16 << 0) | (1 << 8); >+ txdctl |= (32 << 0) | (1 << 8); > IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl); > } > >@@ -1390,7 +1404,7 @@ > ixgbe_start_locked(txr, ifp); > #endif > IXGBE_TX_UNLOCK(txr); >- if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { >+ if (more) { > taskqueue_enqueue(que->tq, &que->que_task); > return; > } >@@ -1648,7 +1662,7 @@ > ifmr->ifm_active |= IFM_100_TX | IFM_FDX; > break; > case IXGBE_LINK_SPEED_1GB_FULL: >- ifmr->ifm_active |= IFM_1000_T | IFM_FDX; >+ ifmr->ifm_active |= adapter->optics | IFM_FDX; > break; > case IXGBE_LINK_SPEED_10GB_FULL: > ifmr->ifm_active |= adapter->optics | IFM_FDX; >@@ -1776,10 +1790,10 @@ > } > > /* Make certain there are enough descriptors */ >- if (nsegs > txr->tx_avail - 2) { >+ if ((nsegs + 1) > txr->tx_avail - 2) { > txr->no_desc_avail++; >- error = ENOBUFS; >- goto xmit_fail; >+ bus_dmamap_unload(txr->txtag, map); >+ return (ENOBUFS); > } > m_head = *m_headp; > >@@ -1789,9 +1803,8 @@ > ** a packet. > */ > if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { >- if (ixgbe_tso_setup(txr, m_head, &paylen)) { >+ if (ixgbe_tso_setup(txr, m_head, &paylen, &olinfo_status)) { > cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; >- olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; > olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; > olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; > ++adapter->tso_tx; >@@ -1870,10 +1883,6 @@ > > return (0); > >-xmit_fail: >- bus_dmamap_unload(txr->txtag, txbuf->map); >- return (error); >- > } > > static void >@@ -1992,13 +2001,11 @@ > { > struct adapter *adapter = arg; > device_t dev = adapter->dev; >- struct ifnet *ifp = adapter->ifp; > struct ix_queue *que = adapter->queues; > struct tx_ring *txr = adapter->tx_rings; >- int hung, busy, paused; >+ int hung = 0, paused = 0; > > mtx_assert(&adapter->core_mtx, MA_OWNED); >- hung = busy = paused = 0; > > /* Check for pluggable optics */ > if (adapter->sfp_probe) >@@ -2017,27 +2024,18 @@ > > /* > ** Check the TX queues status >- ** - central locked handling of OACTIVE > ** - watchdog only if all queues show hung > */ > for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { > if ((txr->queue_status & IXGBE_QUEUE_HUNG) && > (paused == 0)) > ++hung; >- if (txr->queue_status & IXGBE_QUEUE_DEPLETED) >- ++busy; > if ((txr->queue_status & IXGBE_QUEUE_IDLE) == 0) > taskqueue_enqueue(que->tq, &que->que_task); > } > /* Only truely watchdog if all queues show hung */ > if (hung == adapter->num_queues) > goto watchdog; >- /* Only turn off the stack flow when ALL are depleted */ >- if (busy == adapter->num_queues) >- ifp->if_drv_flags |= IFF_DRV_OACTIVE; >- else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) && >- (busy < adapter->num_queues)) >- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; > > out: > ixgbe_rearm_queues(adapter, adapter->que_mask); >@@ -2066,7 +2064,6 @@ > ixgbe_update_link_status(struct adapter *adapter) > { > struct ifnet *ifp = adapter->ifp; >- struct tx_ring *txr = adapter->tx_rings; > device_t dev = adapter->dev; > > >@@ -2087,9 +2084,6 @@ > device_printf(dev,"Link is Down\n"); > if_link_state_change(ifp, LINK_STATE_DOWN); > adapter->link_active = FALSE; >- for (int i = 0; i < adapter->num_queues; >- i++, txr++) >- txr->queue_status = IXGBE_QUEUE_IDLE; > } > } > >@@ -2192,6 +2186,11 @@ > return; > } > >+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { >+ adapter->optics = IFM_1000_SX; >+ return; >+ } >+ > if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR | > IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) { > adapter->optics = IFM_10G_LR; >@@ -2229,6 +2228,9 @@ > { > device_t dev = adapter->dev; > struct ix_queue *que = adapter->queues; >+#if __FreeBSD_version >= 800000 >+ struct tx_ring *txr = adapter->tx_rings; >+#endif > int error, rid = 0; > > /* MSI RID at 1 */ >@@ -2248,6 +2250,9 @@ > * Try allocating a fast interrupt and the associated deferred > * processing contexts. > */ >+#if __FreeBSD_version >= 800000 >+ TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); >+#endif > TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); > que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, > taskqueue_thread_enqueue, &que->tq); >@@ -2294,9 +2299,10 @@ > { > device_t dev = adapter->dev; > struct ix_queue *que = adapter->queues; >+ struct tx_ring *txr = adapter->tx_rings; > int error, rid, vector = 0; > >- for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { >+ for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { > rid = vector + 1; > que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, > RF_SHAREABLE | RF_ACTIVE); >@@ -2326,6 +2332,9 @@ > if (adapter->num_queues > 1) > bus_bind_intr(dev, que->res, i); > >+#if __FreeBSD_version >= 800000 >+ TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); >+#endif > TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); > que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, > taskqueue_thread_enqueue, &que->tq); >@@ -2569,12 +2578,13 @@ > ifp->if_softc = adapter; > ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; > ifp->if_ioctl = ixgbe_ioctl; >- ifp->if_start = ixgbe_start; > #if __FreeBSD_version >= 800000 > ifp->if_transmit = ixgbe_mq_start; > ifp->if_qflush = ixgbe_qflush; >+#else >+ ifp->if_start = ixgbe_start; >+ IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); > #endif >- ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; > > ether_ifattach(ifp, adapter->hw.mac.addr); > >@@ -2586,7 +2596,7 @@ > */ > ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); > >- ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; >+ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM; > ifp->if_capabilities |= IFCAP_JUMBO_MTU; > ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING > | IFCAP_VLAN_HWTSO >@@ -3258,6 +3268,7 @@ > case ETHERTYPE_IPV6: > ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); > ip_hlen = sizeof(struct ip6_hdr); >+ /* XXX-BZ this will go badly in case of ext hdrs. */ > ipproto = ip6->ip6_nxt; > type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; > break; >@@ -3316,17 +3327,23 @@ > * > **********************************************************************/ > static bool >-ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen) >+ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen, >+ u32 *olinfo_status) > { > struct adapter *adapter = txr->adapter; > struct ixgbe_adv_tx_context_desc *TXD; > struct ixgbe_tx_buf *tx_buffer; > u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; >- u32 mss_l4len_idx = 0; >- u16 vtag = 0; >- int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen; >+ u32 mss_l4len_idx = 0, len; >+ u16 vtag = 0, eh_type; >+ int ctxd, ehdrlen, ip_hlen, tcp_hlen; > struct ether_vlan_header *eh; >+#if ((__FreeBSD_version >= 900505) && defined(INET6)) >+ struct ip6_hdr *ip6; >+#endif >+#ifdef INET > struct ip *ip; >+#endif > struct tcphdr *th; > > >@@ -3335,32 +3352,62 @@ > * Jump over vlan headers if already present > */ > eh = mtod(mp, struct ether_vlan_header *); >- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) >+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { > ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; >- else >+ eh_type = eh->evl_proto; >+ } else { > ehdrlen = ETHER_HDR_LEN; >+ eh_type = eh->evl_encap_proto; >+ } > > /* Ensure we have at least the IP+TCP header in the first mbuf. */ >- if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr)) >- return FALSE; >+ len = ehdrlen + sizeof(struct tcphdr); >+ switch (ntohs(eh_type)) { >+#if ((__FreeBSD_version >= 900505) && defined(INET6)) >+ case ETHERTYPE_IPV6: >+ if (mp->m_len < len + sizeof(struct ip6_hdr)) >+ return FALSE; >+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); >+ /* XXX-BZ For now we do not pretend to support ext. hdrs. */ >+ if (ip6->ip6_nxt != IPPROTO_TCP) >+ return FALSE; >+ ip_hlen = sizeof(struct ip6_hdr); >+ th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); >+ th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); >+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; >+ break; >+#endif >+#ifdef INET >+ case ETHERTYPE_IP: >+ if (mp->m_len < len + sizeof(struct ip)) >+ return FALSE; >+ ip = (struct ip *)(mp->m_data + ehdrlen); >+ if (ip->ip_p != IPPROTO_TCP) >+ return FALSE; >+ ip->ip_sum = 0; >+ ip_hlen = ip->ip_hl << 2; >+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen); >+ th->th_sum = in_pseudo(ip->ip_src.s_addr, >+ ip->ip_dst.s_addr, htons(IPPROTO_TCP)); >+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; >+ /* Tell transmit desc to also do IPv4 checksum. */ >+ *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; >+ break; >+#endif >+ default: >+ panic("%s: CSUM_TSO but no supported IP version (0x%04x)", >+ __func__, ntohs(eh_type)); >+ break; >+ } > > ctxd = txr->next_avail_desc; > tx_buffer = &txr->tx_buffers[ctxd]; > TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; > >- ip = (struct ip *)(mp->m_data + ehdrlen); >- if (ip->ip_p != IPPROTO_TCP) >- return FALSE; /* 0 */ >- ip->ip_sum = 0; >- ip_hlen = ip->ip_hl << 2; >- th = (struct tcphdr *)((caddr_t)ip + ip_hlen); >- th->th_sum = in_pseudo(ip->ip_src.s_addr, >- ip->ip_dst.s_addr, htons(IPPROTO_TCP)); > tcp_hlen = th->th_off << 2; >- hdrlen = ehdrlen + ip_hlen + tcp_hlen; > > /* This is used in the transmit desc in encap */ >- *paylen = mp->m_pkthdr.len - hdrlen; >+ *paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; > > /* VLAN MACLEN IPLEN */ > if (mp->m_flags & M_VLANTAG) { >@@ -3375,10 +3422,8 @@ > /* ADV DTYPE TUCMD */ > type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; > type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; >- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; > TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); > >- > /* MSS L4LEN IDX */ > mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); > mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); >@@ -3620,10 +3665,6 @@ > if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG)) > txr->queue_status = IXGBE_QUEUE_HUNG; > >- /* With a minimum free clear the depleted state bit. */ >- if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) >- txr->queue_status &= ~IXGBE_QUEUE_DEPLETED; >- > if (txr->tx_avail == adapter->num_tx_desc) { > txr->queue_status = IXGBE_QUEUE_IDLE; > return (FALSE); >@@ -3698,21 +3739,30 @@ > mp = rxbuf->m_pack; > > mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; >- /* Get the memory mapping */ >- error = bus_dmamap_load_mbuf_sg(rxr->ptag, >- rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); >- if (error != 0) { >- printf("Refresh mbufs: payload dmamap load" >- " failure - %d\n", error); >- m_free(mp); >- rxbuf->m_pack = NULL; >- goto update; >+ >+ /* If we're dealing with an mbuf that was copied rather >+ * than replaced, there's no need to go through busdma. >+ */ >+ if ((rxbuf->flags & IXGBE_RX_COPY) == 0) { >+ /* Get the memory mapping */ >+ error = bus_dmamap_load_mbuf_sg(rxr->ptag, >+ rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); >+ if (error != 0) { >+ printf("Refresh mbufs: payload dmamap load" >+ " failure - %d\n", error); >+ m_free(mp); >+ rxbuf->m_pack = NULL; >+ goto update; >+ } >+ rxbuf->m_pack = mp; >+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap, >+ BUS_DMASYNC_PREREAD); >+ rxbuf->paddr = rxr->rx_base[i].read.pkt_addr = >+ htole64(pseg[0].ds_addr); >+ } else { >+ rxr->rx_base[i].read.pkt_addr = rxbuf->paddr; >+ rxbuf->flags &= ~IXGBE_RX_COPY; > } >- rxbuf->m_pack = mp; >- bus_dmamap_sync(rxr->ptag, rxbuf->pmap, >- BUS_DMASYNC_PREREAD); >- rxr->rx_base[i].read.pkt_addr = >- htole64(pseg[0].ds_addr); > > refreshed = TRUE; > /* Next is precalculated */ >@@ -4025,6 +4075,7 @@ > rxr->next_to_refresh = 0; > rxr->lro_enabled = FALSE; > rxr->rx_split_packets = 0; >+ rxr->rx_copies = 0; > rxr->rx_bytes = 0; > rxr->discard = FALSE; > rxr->vtag_strip = FALSE; >@@ -4319,15 +4370,17 @@ > { > > /* >- * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet >+ * ATM LRO is only for IP/TCP packets and TCP checksum of the packet > * should be computed by hardware. Also it should not have VLAN tag in >- * ethernet header. >+ * ethernet header. In case of IPv6 we do not yet support ext. hdrs. > */ > if (rxr->lro_enabled && > (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && > (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && >- (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == >- (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) && >+ ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == >+ (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) || >+ (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) == >+ (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) && > (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == > (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { > /* >@@ -4580,14 +4633,36 @@ > ** that determines what we are > */ > sendmp = rbuf->fmp; >- rbuf->m_pack = rbuf->fmp = NULL; > > if (sendmp != NULL) { /* secondary frag */ >+ rbuf->m_pack = rbuf->fmp = NULL; > mp->m_flags &= ~M_PKTHDR; > sendmp->m_pkthdr.len += mp->m_len; > } else { >+ /* >+ * Optimize. This might be a small packet, >+ * maybe just a TCP ACK. Do a fast copy that >+ * is cache aligned into a new mbuf, and >+ * leave the old mbuf+cluster for re-use. >+ */ >+ if (eop && plen <= IXGBE_RX_COPY_LEN) { >+ sendmp = m_gethdr(M_DONTWAIT, MT_DATA); >+ if (sendmp != NULL) { >+ sendmp->m_data += >+ IXGBE_RX_COPY_ALIGN; >+ ixgbe_bcopy(mp->m_data, >+ sendmp->m_data, plen); >+ sendmp->m_len = plen; >+ rxr->rx_copies++; >+ rbuf->flags |= IXGBE_RX_COPY; >+ } >+ } >+ if (sendmp == NULL) { >+ rbuf->m_pack = rbuf->fmp = NULL; >+ sendmp = mp; >+ } >+ > /* first desc of a non-ps chain */ >- sendmp = mp; > sendmp->m_flags |= M_PKTHDR; > sendmp->m_pkthdr.len = mp->m_len; > if (staterr & IXGBE_RXD_STAT_VP) { >@@ -5438,6 +5513,9 @@ > SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", > CTLFLAG_RD, &rxr->rx_bytes, > "Queue Bytes Received"); >+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", >+ CTLFLAG_RD, &rxr->rx_copies, >+ "Copied RX Frames"); > SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", > CTLFLAG_RD, &lro->lro_queued, 0, > "LRO Queued"); >Index: ixgbe.h >=================================================================== >--- ixgbe.h (revision 247019) >+++ ixgbe.h (working copy) >@@ -154,6 +154,19 @@ > #define IXGBE_FC_HI 0x20000 > #define IXGBE_FC_LO 0x10000 > >+/* >+ * Used for optimizing small rx mbufs. Effort is made to keep the copy >+ * small and aligned for the CPU L1 cache. >+ * >+ * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting >+ * 32 byte alignment needed for the fast bcopy results in 8 bytes being >+ * wasted. Getting 64 byte alignment, which _should_ be ideal for >+ * modern Intel CPUs, results in 40 bytes wasted and a significant drop >+ * in observed efficiency of the optimization, 97.9% -> 81.8%. >+ */ >+#define IXGBE_RX_COPY_LEN 160 >+#define IXGBE_RX_COPY_ALIGN (MHLEN - IXGBE_RX_COPY_LEN) >+ > /* Keep older OS drivers building... */ > #if !defined(SYSCTL_ADD_UQUAD) > #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD >@@ -245,6 +258,9 @@ > struct mbuf *fmp; > bus_dmamap_t hmap; > bus_dmamap_t pmap; >+ u_int flags; >+#define IXGBE_RX_COPY 0x01 >+ uint64_t paddr; > }; > > /* >@@ -298,6 +314,7 @@ > char mtx_name[16]; > #if __FreeBSD_version >= 800000 > struct buf_ring *br; >+ struct task txq_task; > #endif > #ifdef IXGBE_FDIR > u16 atr_sample; >@@ -339,6 +356,7 @@ > /* Soft stats */ > u64 rx_irq; > u64 rx_split_packets; >+ u64 rx_copies; > u64 rx_packets; > u64 rx_bytes; > u64 rx_discarded; >Index: ixgbe_type.h >=================================================================== >--- ixgbe_type.h (revision 247019) >+++ ixgbe_type.h (working copy) >@@ -38,9 +38,6 @@ > #include "ixgbe_osdep.h" > > >-/* Vendor ID */ >-#define IXGBE_INTEL_VENDOR_ID 0x8086 >- > /* Device IDs */ > #define IXGBE_DEV_ID_82598 0x10B6 > #define IXGBE_DEV_ID_82598_BX 0x1508 >@@ -62,11 +59,14 @@ > #define IXGBE_DEV_ID_82599_CX4 0x10F9 > #define IXGBE_DEV_ID_82599_SFP 0x10FB > #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 >+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 > #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 >+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 > #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A > #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 > #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 > #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D >+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A > #define IXGBE_DEV_ID_82599EN_SFP 0x1557 > #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC > #define IXGBE_DEV_ID_82599_T3_LOM 0x151C >@@ -1014,6 +1014,7 @@ > #define IXGBE_RSCCTL_MAXDESC_4 0x04 > #define IXGBE_RSCCTL_MAXDESC_8 0x08 > #define IXGBE_RSCCTL_MAXDESC_16 0x0C >+#define IXGBE_RSCCTL_TS_DIS 0x02 > > /* RSCDBU Bit Masks */ > #define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F >@@ -1026,7 +1027,7 @@ > #define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ > #define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ > #define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ >-#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disabl RSC compl on LLI */ >+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ > #define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ > #define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ > >@@ -1590,6 +1591,7 @@ > #define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ > #define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ > #define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ >+#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ > #define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ > #define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ > #define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ >@@ -3142,6 +3144,7 @@ > u16 subsystem_vendor_id; > u8 revision_id; > bool adapter_stopped; >+ int api_version; > bool force_full_reset; > bool allow_unsupported_sfp; > }; >Index: ixgbe_mbx.h >=================================================================== >--- ixgbe_mbx.h (revision 247019) >+++ ixgbe_mbx.h (working copy) >@@ -84,9 +84,21 @@ > #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ > #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ > #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ >+ >+/* mailbox API, version 1.0 VF requests */ > #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ > #define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ >+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ > >+/* mailbox API, version 1.1 VF requests */ >+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ >+ >+/* GET_QUEUES return data indices within the mailbox */ >+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ >+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ >+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ >+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ >+ > /* length of permanent address message returned from PF */ > #define IXGBE_VF_PERMADDR_MSG_LEN 4 > /* word in permanent address message with the current multicast type */ >Index: ixgbe_x540.c >=================================================================== >--- ixgbe_x540.c (revision 247019) >+++ ixgbe_x540.c (working copy) >@@ -116,6 +116,7 @@ > mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; > mac->ops.check_link = &ixgbe_check_mac_link_generic; > >+ > mac->mcft_size = 128; > mac->vft_size = 128; > mac->num_rar_entries = 128; >@@ -973,3 +974,4 @@ > return IXGBE_SUCCESS; > } > >+ >Index: ixgbe_vf.c >=================================================================== >--- ixgbe_vf.c (revision 247019) >+++ ixgbe_vf.c (working copy) >@@ -142,6 +142,7 @@ > /* Call adapter stop to disable tx/rx and clear interrupts */ > hw->mac.ops.stop_adapter(hw); > >+ > DEBUGOUT("Issuing a function level reset to MAC\n"); > > ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST; >@@ -272,6 +273,17 @@ > return vector; > } > >+static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, >+ u32 *msg, u16 size) >+{ >+ struct ixgbe_mbx_info *mbx = &hw->mbx; >+ u32 retmsg[IXGBE_VFMAILBOX_SIZE]; >+ s32 retval = mbx->ops.write_posted(hw, msg, size, 0); >+ >+ if (!retval) >+ mbx->ops.read_posted(hw, retmsg, size, 0); >+} >+ > /** > * ixgbe_set_rar_vf - set device MAC address > * @hw: pointer to hardware structure >@@ -514,3 +526,58 @@ > return IXGBE_SUCCESS; > } > >+/** >+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length >+ * @hw: pointer to the HW structure >+ * @max_size: value to assign to max frame size >+ **/ >+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) >+{ >+ u32 msgbuf[2]; >+ >+ msgbuf[0] = IXGBE_VF_SET_LPE; >+ msgbuf[1] = max_size; >+ ixgbevf_write_msg_read_ack(hw, msgbuf, 2); >+} >+ >+/** >+ * ixgbevf_negotiate_api_version - Negotiate supported API version >+ * @hw: pointer to the HW structure >+ * @api: integer containing requested API version >+ **/ >+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) >+{ >+ int err; >+ u32 msg[3]; >+ >+ /* Negotiate the mailbox API version */ >+ msg[0] = IXGBE_VF_API_NEGOTIATE; >+ msg[1] = api; >+ msg[2] = 0; >+ err = hw->mbx.ops.write_posted(hw, msg, 3, 0); >+ >+ if (!err) >+ err = hw->mbx.ops.read_posted(hw, msg, 3, 0); >+ >+ if (!err) { >+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; >+ >+ /* Store value and return 0 on success */ >+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { >+ hw->api_version = api; >+ return 0; >+ } >+ >+ err = IXGBE_ERR_INVALID_ARGUMENT; >+ } >+ >+ return err; >+} >+ >+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, >+ unsigned int *default_tc) >+{ >+ UNREFERENCED_3PARAMETER(hw, num_tcs, default_tc); >+ return IXGBE_SUCCESS; >+} >+ >Index: ixgbe_osdep.h >=================================================================== >--- ixgbe_osdep.h (revision 247019) >+++ ixgbe_osdep.h (working copy) >@@ -69,12 +69,17 @@ > #define DEBUGOUT1(S,A) printf(S "\n",A) > #define DEBUGOUT2(S,A,B) printf(S "\n",A,B) > #define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C) >+ #define DEBUGOUT4(S,A,B,C,D) printf(S "\n",A,B,C,D) >+ #define DEBUGOUT5(S,A,B,C,D,E) printf(S "\n",A,B,C,D,E) >+ #define DEBUGOUT6(S,A,B,C,D,E,F) printf(S "\n",A,B,C,D,E,F) > #define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G) > #else > #define DEBUGOUT(S) > #define DEBUGOUT1(S,A) > #define DEBUGOUT2(S,A,B) > #define DEBUGOUT3(S,A,B,C) >+ #define DEBUGOUT4(S,A,B,C,D) >+ #define DEBUGOUT5(S,A,B,C,D,E) > #define DEBUGOUT6(S,A,B,C,D,E,F) > #define DEBUGOUT7(S,A,B,C,D,E,F,G) > #endif >@@ -86,6 +91,9 @@ > #define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ > #define PCI_COMMAND_REGISTER PCIR_COMMAND > >+/* Shared code dropped this define.. */ >+#define IXGBE_INTEL_VENDOR_ID 0x8086 >+ > /* Bunch of defines for shared code bogosity */ > #define UNREFERENCED_PARAMETER(_p) > #define UNREFERENCED_1PARAMETER(_p) >@@ -143,6 +151,25 @@ > #define prefetch(x) > #endif > >+/* >+ * Optimized bcopy thanks to Luigi Rizzo's investigative work. Assumes >+ * non-overlapping regions and 32-byte padding on both src and dst. >+ */ >+static __inline int >+ixgbe_bcopy(void *_src, void *_dst, int l) >+{ >+ uint64_t *src = _src; >+ uint64_t *dst = _dst; >+ >+ for (; l > 0; l -= 32) { >+ *dst++ = *src++; >+ *dst++ = *src++; >+ *dst++ = *src++; >+ *dst++ = *src++; >+ } >+ return (0); >+} >+ > struct ixgbe_osdep > { > bus_space_tag_t mem_bus_space_tag;
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 176281
: 132077