
Intel listed total 28 commits that need us to back port. There are 9 commits that are already included in our code base. The commit "ice: Add support for E825-C TS PLL handling" will not be back ported since we're not dealing with E825 for 24.09. So we need back port 18 commits. These commits were introduced in linux-6.9.y and linux-6.10.y. To back port these 18 commits successfully, we totally back ported 37 upstream commits. 1) The patches 1-15 are cherry picked to fix the conflicts for patch 16 ("ice: introduce PTP state machine") and patch 36 "ice: Introduce ice_ptp_hw struct". Also will be helpful for the subsequent commits back porting. 2) The patches 24-27 are cherry picked to fix the conflicts for patch 28 ("ice: Fix debugfs with devlink reload") 3) The minor adjust was done for the patches 17, 21, 23 and 33 to fit with the context change. Verification: - installs from iso succeed on servers with ice(Intel Ethernet Controller E810-XXVDA4T Westport Channel) and i40e hw(Intel Ethernet Controller X710) for rt and std. - interfaces are up and pass packets for rt and std. - create vfs, ensure that they are picked up by the new iavf driver and that the interface can come up and pass packets on rt and std system. - Check dmesg to see DDP package is loaded successfully and the version is 1.3.36.0 for rt and std. Story: 2011056 Task: 50950 Change-Id: I9aef0378ea01451684341093a167eaead3edc458 Signed-off-by: Jiping Ma <jiping.ma2@windriver.com>
130 lines
4.5 KiB
Diff
130 lines
4.5 KiB
Diff
From 619e0e61b39cf051137613459d36c4fe8f435e57 Mon Sep 17 00:00:00 2001
|
|
From: Jacob Keller <jacob.e.keller@intel.com>
|
|
Date: Mon, 26 Feb 2024 16:14:55 -0800
|
|
Subject: [PATCH 31/36] ice: use GENMASK instead of BIT(n) - 1 in pack
|
|
functions
|
|
|
|
The functions used to pack the Tx and Rx context into the hardware format
|
|
rely on using BIT() and then subtracting 1 to get a bitmask. These
|
|
functions even have a comment about how x86 machines can't use this method
|
|
for certain widths because the SHL instructions will not work properly.
|
|
|
|
The Linux kernel already provides the GENMASK macro for generating a
|
|
suitable bitmask. Further, GENMASK is capable of generating the mask
|
|
including the shift_width. Since width is the total field width, take care
|
|
to subtract one to get the final bit position.
|
|
|
|
Since we now include the shifted bits as part of the mask, shift the source
|
|
value first before applying the mask.
|
|
|
|
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
|
|
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
|
|
Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
|
|
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
|
|
(cherry picked from commit a45d1bf516c097bb7ae4983d3128ebf139be952c)
|
|
Signed-off-by: Jiping Ma <jiping.ma2@windriver.com>
|
|
---
|
|
drivers/net/ethernet/intel/ice/ice_common.c | 44 ++++-----------------
|
|
1 file changed, 8 insertions(+), 36 deletions(-)
|
|
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
|
|
index 17f60a98c8ed..55a2e264dd69 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_common.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
|
|
@@ -4284,14 +4284,11 @@ static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
|
|
|
|
/* prepare the bits and mask */
|
|
shift_width = ce_info->lsb % 8;
|
|
- mask = (u8)(BIT(ce_info->width) - 1);
|
|
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
|
|
|
|
src_byte = *from;
|
|
- src_byte &= mask;
|
|
-
|
|
- /* shift to correct alignment */
|
|
- mask <<= shift_width;
|
|
src_byte <<= shift_width;
|
|
+ src_byte &= mask;
|
|
|
|
/* get the current bits from the target bit string */
|
|
dest = dest_ctx + (ce_info->lsb / 8);
|
|
@@ -4324,17 +4321,14 @@ static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
|
|
|
|
/* prepare the bits and mask */
|
|
shift_width = ce_info->lsb % 8;
|
|
- mask = BIT(ce_info->width) - 1;
|
|
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
|
|
|
|
/* don't swizzle the bits until after the mask because the mask bits
|
|
* will be in a different bit position on big endian machines
|
|
*/
|
|
src_word = *(u16 *)from;
|
|
- src_word &= mask;
|
|
-
|
|
- /* shift to correct alignment */
|
|
- mask <<= shift_width;
|
|
src_word <<= shift_width;
|
|
+ src_word &= mask;
|
|
|
|
/* get the current bits from the target bit string */
|
|
dest = dest_ctx + (ce_info->lsb / 8);
|
|
@@ -4367,25 +4361,14 @@ static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
|
|
|
|
/* prepare the bits and mask */
|
|
shift_width = ce_info->lsb % 8;
|
|
-
|
|
- /* if the field width is exactly 32 on an x86 machine, then the shift
|
|
- * operation will not work because the SHL instructions count is masked
|
|
- * to 5 bits so the shift will do nothing
|
|
- */
|
|
- if (ce_info->width < 32)
|
|
- mask = BIT(ce_info->width) - 1;
|
|
- else
|
|
- mask = (u32)~0;
|
|
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
|
|
|
|
/* don't swizzle the bits until after the mask because the mask bits
|
|
* will be in a different bit position on big endian machines
|
|
*/
|
|
src_dword = *(u32 *)from;
|
|
- src_dword &= mask;
|
|
-
|
|
- /* shift to correct alignment */
|
|
- mask <<= shift_width;
|
|
src_dword <<= shift_width;
|
|
+ src_dword &= mask;
|
|
|
|
/* get the current bits from the target bit string */
|
|
dest = dest_ctx + (ce_info->lsb / 8);
|
|
@@ -4418,25 +4401,14 @@ static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
|
|
|
|
/* prepare the bits and mask */
|
|
shift_width = ce_info->lsb % 8;
|
|
-
|
|
- /* if the field width is exactly 64 on an x86 machine, then the shift
|
|
- * operation will not work because the SHL instructions count is masked
|
|
- * to 6 bits so the shift will do nothing
|
|
- */
|
|
- if (ce_info->width < 64)
|
|
- mask = BIT_ULL(ce_info->width) - 1;
|
|
- else
|
|
- mask = (u64)~0;
|
|
+ mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
|
|
|
|
/* don't swizzle the bits until after the mask because the mask bits
|
|
* will be in a different bit position on big endian machines
|
|
*/
|
|
src_qword = *(u64 *)from;
|
|
- src_qword &= mask;
|
|
-
|
|
- /* shift to correct alignment */
|
|
- mask <<= shift_width;
|
|
src_qword <<= shift_width;
|
|
+ src_qword &= mask;
|
|
|
|
/* get the current bits from the target bit string */
|
|
dest = dest_ctx + (ce_info->lsb / 8);
|
|
--
|
|
2.43.0
|
|
|