vxlan: reuse inner packet flow hash for tunnel outer header load balance

Type: fix

Several tunnels encapsulation use udp as outer header and udp src port
is set by inner header flow hash, such as gtpu, geneve, vxlan, vxlan-gbd
Since flow hash of inner header is already been calculated, keeping it
to vnet_buffere[b]->ip.flow_hash should save load-balance node work to
select ECMP uplinks.

Change-Id: I0e4e2b27178f4fcc5785e221d6d1f3e8747d0d59
Signed-off-by: Shawn Ji <xiaji@tethrnet.com>
(cherry picked from commit 623b4f85e6ee4611ae15bb3103fe30725ca977ed)
This commit is contained in:
Shawn Ji
2019-12-18 10:10:54 +08:00
parent 2eb13c2954
commit d0bb569621
4 changed files with 31 additions and 0 deletions

View File

@ -413,6 +413,12 @@ gtpu_encap_inline (vlib_main_t * vm,
stats_n_packets += 4;
stats_n_bytes += len0 + len1 + len2 + len3;
/* save inner packet flow_hash for load-balance node */
vnet_buffer (b0)->ip.flow_hash = flow_hash0;
vnet_buffer (b1)->ip.flow_hash = flow_hash1;
vnet_buffer (b2)->ip.flow_hash = flow_hash2;
vnet_buffer (b3)->ip.flow_hash = flow_hash3;
/* Batch stats increment on the same gtpu tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
@ -611,6 +617,9 @@ gtpu_encap_inline (vlib_main_t * vm,
stats_n_packets += 1;
stats_n_bytes += len0;
/* save inner packet flow_hash for load-balance node */
vnet_buffer (b0)->ip.flow_hash = flow_hash0;
/* Batch stats increment on the same gtpu tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile

View File

@ -120,6 +120,7 @@ geneve_encap_inline (vlib_main_t * vm,
flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
/* Get next node index and adj index from tunnel next_dpo */
if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
{
@ -289,6 +290,10 @@ geneve_encap_inline (vlib_main_t * vm,
stats_n_packets += 2;
stats_n_bytes += len0 + len1;
/* save inner packet flow_hash for load-balance node */
vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
/* Batch stats increment on the same geneve tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
@ -467,6 +472,9 @@ geneve_encap_inline (vlib_main_t * vm,
stats_n_packets += 1;
stats_n_bytes += len0;
/* save inner packet flow_hash for load-balance node */
vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
/* Batch stats increment on the same geneve tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile

View File

@ -309,6 +309,10 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
udp1->checksum = 0xffff;
}
/* save inner packet flow_hash for load-balance node */
vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
vlib_increment_combined_counter (tx_counter, thread_index,
@ -451,6 +455,9 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
udp0->checksum = 0xffff;
}
/* save inner packet flow_hash for load-balance node */
vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
pkts_encapsulated++;

View File

@ -287,6 +287,10 @@ vxlan_encap_inline (vlib_main_t * vm,
udp1->checksum = 0xffff;
}
/* save inner packet flow_hash for load-balance node */
vnet_buffer (b0)->ip.flow_hash = flow_hash0;
vnet_buffer (b1)->ip.flow_hash = flow_hash1;
if (sw_if_index0 == sw_if_index1)
{
vlib_increment_combined_counter (tx_counter, thread_index,
@ -424,6 +428,9 @@ vxlan_encap_inline (vlib_main_t * vm,
udp0->checksum = 0xffff;
}
/* reuse inner packet flow_hash for load-balance node */
vnet_buffer (b0)->ip.flow_hash = flow_hash0;
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
pkts_encapsulated ++;