2018-06-24 22:49:55 +02:00
|
|
|
import unittest
|
2018-09-26 11:19:00 +02:00
|
|
|
import socket
|
2019-04-16 02:41:34 +00:00
|
|
|
import struct
|
2024-07-23 01:28:19 -04:00
|
|
|
import re
|
|
|
|
import os
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-02-25 14:32:02 +00:00
|
|
|
from scapy.layers.inet import IP, ICMP, TCP, UDP
|
2019-04-24 23:39:16 +02:00
|
|
|
from scapy.layers.ipsec import SecurityAssociation, ESP
|
2019-12-13 23:39:35 +00:00
|
|
|
from scapy.layers.l2 import Ether
|
2024-07-23 01:28:19 -04:00
|
|
|
from scapy.packet import raw, Raw, Padding
|
2022-04-26 19:02:15 +02:00
|
|
|
from scapy.layers.inet6 import (
|
|
|
|
IPv6,
|
|
|
|
ICMPv6EchoRequest,
|
|
|
|
IPv6ExtHdrHopByHop,
|
|
|
|
IPv6ExtHdrFragment,
|
|
|
|
IPv6ExtHdrDestOpt,
|
|
|
|
)
|
2019-12-20 00:54:57 +00:00
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2023-08-31 00:47:44 -04:00
|
|
|
from framework import VppTestCase
|
|
|
|
from asfframework import VppTestRunner
|
2019-07-29 14:49:52 +00:00
|
|
|
from util import ppp, reassemble4, fragment_rfc791, fragment_rfc8200
|
2019-01-09 21:22:20 -08:00
|
|
|
from vpp_papi import VppEnum
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
from vpp_ipsec import VppIpsecSpd, VppIpsecSpdEntry, VppIpsecSpdItfBinding
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
from ipaddress import ip_address
|
2024-03-11 10:38:46 +00:00
|
|
|
from config import config
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2020-12-04 14:57:51 -05:00
|
|
|
class IPsecIPv4Params:
|
2018-09-26 11:19:00 +02:00
|
|
|
addr_type = socket.AF_INET
|
|
|
|
addr_any = "0.0.0.0"
|
|
|
|
addr_bcast = "255.255.255.255"
|
|
|
|
addr_len = 32
|
|
|
|
is_ipv6 = 0
|
|
|
|
|
2019-01-09 21:22:20 -08:00
|
|
|
def __init__(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.remote_tun_if_host = "1.1.1.1"
|
|
|
|
self.remote_tun_if_host6 = "1111::1"
|
2019-01-09 21:22:20 -08:00
|
|
|
|
2019-12-16 00:53:11 +00:00
|
|
|
self.scapy_tun_sa_id = 100
|
2020-12-23 16:22:28 +00:00
|
|
|
self.scapy_tun_spi = 1000
|
2019-12-16 00:53:11 +00:00
|
|
|
self.vpp_tun_sa_id = 200
|
2020-12-23 16:22:28 +00:00
|
|
|
self.vpp_tun_spi = 2000
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2019-12-16 00:53:11 +00:00
|
|
|
self.scapy_tra_sa_id = 300
|
2020-12-23 16:22:28 +00:00
|
|
|
self.scapy_tra_spi = 3000
|
2019-12-16 00:53:11 +00:00
|
|
|
self.vpp_tra_sa_id = 400
|
2020-12-23 16:22:28 +00:00
|
|
|
self.vpp_tra_spi = 4000
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2021-02-09 14:04:02 +00:00
|
|
|
self.outer_hop_limit = 64
|
|
|
|
self.inner_hop_limit = 255
|
|
|
|
self.outer_flow_label = 0
|
|
|
|
self.inner_flow_label = 0x12345
|
|
|
|
|
2022-12-22 11:26:57 +00:00
|
|
|
self.anti_replay_window_size = 64
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
self.auth_algo_vpp_id = (
|
|
|
|
VppEnum.vl_api_ipsec_integ_alg_t.IPSEC_API_INTEG_ALG_SHA1_96
|
|
|
|
)
|
|
|
|
self.auth_algo = "HMAC-SHA1-96" # scapy name
|
|
|
|
self.auth_key = b"C91KUR9GYMm5GfkEvNjX"
|
|
|
|
|
|
|
|
self.crypt_algo_vpp_id = (
|
|
|
|
VppEnum.vl_api_ipsec_crypto_alg_t.IPSEC_API_CRYPTO_ALG_AES_CBC_128
|
|
|
|
)
|
|
|
|
self.crypt_algo = "AES-CBC" # scapy name
|
|
|
|
self.crypt_key = b"JPjyOWBeVEQiMe7h"
|
2019-04-16 02:41:34 +00:00
|
|
|
self.salt = 0
|
2019-02-25 14:32:02 +00:00
|
|
|
self.flags = 0
|
|
|
|
self.nat_header = None
|
2022-04-26 19:02:15 +02:00
|
|
|
self.tun_flags = (
|
|
|
|
VppEnum.vl_api_tunnel_encap_decap_flags_t.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
|
|
|
|
)
|
2020-01-02 04:06:10 +00:00
|
|
|
self.dscp = 0
|
2021-02-25 10:05:32 +00:00
|
|
|
self.async_mode = False
|
2018-09-26 11:19:00 +02:00
|
|
|
|
|
|
|
|
2020-12-04 14:57:51 -05:00
|
|
|
class IPsecIPv6Params:
|
2018-09-26 11:19:00 +02:00
|
|
|
addr_type = socket.AF_INET6
|
|
|
|
addr_any = "0::0"
|
|
|
|
addr_bcast = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"
|
|
|
|
addr_len = 128
|
|
|
|
is_ipv6 = 1
|
|
|
|
|
2019-01-09 21:22:20 -08:00
|
|
|
def __init__(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.remote_tun_if_host = "1111:1111:1111:1111:1111:1111:1111:1111"
|
|
|
|
self.remote_tun_if_host4 = "1.1.1.1"
|
2019-01-09 21:22:20 -08:00
|
|
|
|
2019-12-16 00:53:11 +00:00
|
|
|
self.scapy_tun_sa_id = 500
|
2019-01-09 21:22:20 -08:00
|
|
|
self.scapy_tun_spi = 3001
|
2019-12-16 00:53:11 +00:00
|
|
|
self.vpp_tun_sa_id = 600
|
2019-01-09 21:22:20 -08:00
|
|
|
self.vpp_tun_spi = 3000
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2019-12-16 00:53:11 +00:00
|
|
|
self.scapy_tra_sa_id = 700
|
2019-01-09 21:22:20 -08:00
|
|
|
self.scapy_tra_spi = 4001
|
2019-12-16 00:53:11 +00:00
|
|
|
self.vpp_tra_sa_id = 800
|
2019-01-09 21:22:20 -08:00
|
|
|
self.vpp_tra_spi = 4000
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2021-02-09 14:04:02 +00:00
|
|
|
self.outer_hop_limit = 64
|
|
|
|
self.inner_hop_limit = 255
|
|
|
|
self.outer_flow_label = 0
|
|
|
|
self.inner_flow_label = 0x12345
|
|
|
|
|
2022-12-22 11:26:57 +00:00
|
|
|
self.anti_replay_window_size = 64
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
self.auth_algo_vpp_id = (
|
|
|
|
VppEnum.vl_api_ipsec_integ_alg_t.IPSEC_API_INTEG_ALG_SHA1_96
|
|
|
|
)
|
|
|
|
self.auth_algo = "HMAC-SHA1-96" # scapy name
|
|
|
|
self.auth_key = b"C91KUR9GYMm5GfkEvNjX"
|
|
|
|
|
|
|
|
self.crypt_algo_vpp_id = (
|
|
|
|
VppEnum.vl_api_ipsec_crypto_alg_t.IPSEC_API_CRYPTO_ALG_AES_CBC_128
|
|
|
|
)
|
|
|
|
self.crypt_algo = "AES-CBC" # scapy name
|
|
|
|
self.crypt_key = b"JPjyOWBeVEQiMe7h"
|
2019-04-16 02:41:34 +00:00
|
|
|
self.salt = 0
|
2019-02-25 14:32:02 +00:00
|
|
|
self.flags = 0
|
|
|
|
self.nat_header = None
|
2022-04-26 19:02:15 +02:00
|
|
|
self.tun_flags = (
|
|
|
|
VppEnum.vl_api_tunnel_encap_decap_flags_t.TUNNEL_API_ENCAP_DECAP_FLAG_NONE
|
|
|
|
)
|
2020-01-02 04:06:10 +00:00
|
|
|
self.dscp = 0
|
2021-02-25 10:05:32 +00:00
|
|
|
self.async_mode = False
|
2018-09-26 11:19:00 +02:00
|
|
|
|
|
|
|
|
2019-09-26 16:20:19 +00:00
|
|
|
def mk_scapy_crypt_key(p):
|
2023-03-10 17:33:03 +01:00
|
|
|
if p.crypt_algo in ("AES-GCM", "AES-CTR", "AES-NULL-GMAC"):
|
2019-07-17 15:07:14 +00:00
|
|
|
return p.crypt_key + struct.pack("!I", p.salt)
|
|
|
|
else:
|
|
|
|
return p.crypt_key
|
|
|
|
|
|
|
|
|
2019-03-20 18:24:43 +00:00
|
|
|
def config_tun_params(p, encryption_type, tun_if):
|
|
|
|
ip_class_by_addr_type = {socket.AF_INET: IP, socket.AF_INET6: IPv6}
|
2022-04-26 19:02:15 +02:00
|
|
|
esn_en = bool(
|
|
|
|
p.flags & (VppEnum.vl_api_ipsec_sad_flags_t.IPSEC_API_SAD_FLAG_USE_ESN)
|
|
|
|
)
|
2020-01-02 05:04:00 +00:00
|
|
|
p.tun_dst = tun_if.remote_addr[p.addr_type]
|
|
|
|
p.tun_src = tun_if.local_addr[p.addr_type]
|
2019-09-26 16:20:19 +00:00
|
|
|
crypt_key = mk_scapy_crypt_key(p)
|
2019-03-20 18:24:43 +00:00
|
|
|
p.scapy_tun_sa = SecurityAssociation(
|
2022-04-26 19:02:15 +02:00
|
|
|
encryption_type,
|
2022-11-16 18:45:24 +01:00
|
|
|
spi=p.scapy_tun_spi,
|
2019-04-16 02:41:34 +00:00
|
|
|
crypt_algo=p.crypt_algo,
|
|
|
|
crypt_key=crypt_key,
|
2022-04-26 19:02:15 +02:00
|
|
|
auth_algo=p.auth_algo,
|
|
|
|
auth_key=p.auth_key,
|
|
|
|
tunnel_header=ip_class_by_addr_type[p.addr_type](src=p.tun_dst, dst=p.tun_src),
|
2019-03-21 14:34:09 +00:00
|
|
|
nat_t_header=p.nat_header,
|
2022-04-26 19:02:15 +02:00
|
|
|
esn_en=esn_en,
|
|
|
|
)
|
2019-03-20 18:24:43 +00:00
|
|
|
p.vpp_tun_sa = SecurityAssociation(
|
2022-04-26 19:02:15 +02:00
|
|
|
encryption_type,
|
2022-11-16 18:45:24 +01:00
|
|
|
spi=p.vpp_tun_spi,
|
2019-04-16 02:41:34 +00:00
|
|
|
crypt_algo=p.crypt_algo,
|
|
|
|
crypt_key=crypt_key,
|
2022-04-26 19:02:15 +02:00
|
|
|
auth_algo=p.auth_algo,
|
|
|
|
auth_key=p.auth_key,
|
|
|
|
tunnel_header=ip_class_by_addr_type[p.addr_type](dst=p.tun_dst, src=p.tun_src),
|
2019-03-21 14:34:09 +00:00
|
|
|
nat_t_header=p.nat_header,
|
2022-04-26 19:02:15 +02:00
|
|
|
esn_en=esn_en,
|
|
|
|
)
|
2019-03-20 18:24:43 +00:00
|
|
|
|
|
|
|
|
|
|
|
def config_tra_params(p, encryption_type):
|
2022-04-26 19:02:15 +02:00
|
|
|
esn_en = bool(
|
|
|
|
p.flags & (VppEnum.vl_api_ipsec_sad_flags_t.IPSEC_API_SAD_FLAG_USE_ESN)
|
|
|
|
)
|
2019-09-26 16:20:19 +00:00
|
|
|
crypt_key = mk_scapy_crypt_key(p)
|
2019-03-20 18:24:43 +00:00
|
|
|
p.scapy_tra_sa = SecurityAssociation(
|
|
|
|
encryption_type,
|
2022-11-16 18:45:24 +01:00
|
|
|
spi=p.scapy_tra_spi,
|
2019-03-20 18:24:43 +00:00
|
|
|
crypt_algo=p.crypt_algo,
|
2019-04-16 02:41:34 +00:00
|
|
|
crypt_key=crypt_key,
|
2019-03-20 18:24:43 +00:00
|
|
|
auth_algo=p.auth_algo,
|
|
|
|
auth_key=p.auth_key,
|
2019-03-21 14:34:09 +00:00
|
|
|
nat_t_header=p.nat_header,
|
2022-04-26 19:02:15 +02:00
|
|
|
esn_en=esn_en,
|
|
|
|
)
|
2019-03-20 18:24:43 +00:00
|
|
|
p.vpp_tra_sa = SecurityAssociation(
|
|
|
|
encryption_type,
|
2022-11-16 18:45:24 +01:00
|
|
|
spi=p.vpp_tra_spi,
|
2019-03-20 18:24:43 +00:00
|
|
|
crypt_algo=p.crypt_algo,
|
2019-04-16 02:41:34 +00:00
|
|
|
crypt_key=crypt_key,
|
2019-03-20 18:24:43 +00:00
|
|
|
auth_algo=p.auth_algo,
|
|
|
|
auth_key=p.auth_key,
|
2019-03-21 14:34:09 +00:00
|
|
|
nat_t_header=p.nat_header,
|
2022-04-26 19:02:15 +02:00
|
|
|
esn_en=esn_en,
|
|
|
|
)
|
2019-03-20 18:24:43 +00:00
|
|
|
|
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
class TemplateIpsec(VppTestCase):
|
|
|
|
"""
|
2021-08-12 18:36:02 -04:00
|
|
|
TRANSPORT MODE::
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2021-08-12 18:36:02 -04:00
|
|
|
------ encrypt ---
|
|
|
|
|tra_if| <-------> |VPP|
|
|
|
|
------ decrypt ---
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2021-08-12 18:36:02 -04:00
|
|
|
TUNNEL MODE::
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2021-08-12 18:36:02 -04:00
|
|
|
------ encrypt --- plain ---
|
|
|
|
|tun_if| <------- |VPP| <------ |pg1|
|
|
|
|
------ --- ---
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2021-08-12 18:36:02 -04:00
|
|
|
------ decrypt --- plain ---
|
|
|
|
|tun_if| -------> |VPP| ------> |pg1|
|
|
|
|
------ --- ---
|
2018-06-24 22:49:55 +02:00
|
|
|
"""
|
2022-04-26 19:02:15 +02:00
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
tun_spd_id = 1
|
|
|
|
tra_spd_id = 2
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-01-23 08:16:17 -08:00
|
|
|
def ipsec_select_backend(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""empty method to be overloaded when necessary"""
|
2019-01-23 08:16:17 -08:00
|
|
|
pass
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-03-12 19:23:27 -07:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
super(TemplateIpsec, cls).setUpClass()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
super(TemplateIpsec, cls).tearDownClass()
|
|
|
|
|
2019-03-21 14:34:09 +00:00
|
|
|
def setup_params(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
if not hasattr(self, "ipv4_params"):
|
2020-01-02 04:06:10 +00:00
|
|
|
self.ipv4_params = IPsecIPv4Params()
|
2022-04-26 19:02:15 +02:00
|
|
|
if not hasattr(self, "ipv6_params"):
|
2020-01-02 04:06:10 +00:00
|
|
|
self.ipv6_params = IPsecIPv6Params()
|
2022-04-26 19:02:15 +02:00
|
|
|
self.params = {
|
|
|
|
self.ipv4_params.addr_type: self.ipv4_params,
|
|
|
|
self.ipv6_params.addr_type: self.ipv6_params,
|
|
|
|
}
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
def config_interfaces(self):
|
|
|
|
self.create_pg_interfaces(range(3))
|
|
|
|
self.interfaces = list(self.pg_interfaces)
|
|
|
|
for i in self.interfaces:
|
|
|
|
i.admin_up()
|
|
|
|
i.config_ip4()
|
|
|
|
i.resolve_arp()
|
|
|
|
i.config_ip6()
|
|
|
|
i.resolve_ndp()
|
|
|
|
|
2019-03-21 14:34:09 +00:00
|
|
|
def setUp(self):
|
|
|
|
super(TemplateIpsec, self).setUp()
|
|
|
|
|
|
|
|
self.setup_params()
|
2018-11-08 13:00:02 +01:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
self.vpp_esp_protocol = VppEnum.vl_api_ipsec_proto_t.IPSEC_API_PROTO_ESP
|
|
|
|
self.vpp_ah_protocol = VppEnum.vl_api_ipsec_proto_t.IPSEC_API_PROTO_AH
|
2019-01-23 08:16:17 -08:00
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
self.config_interfaces()
|
2019-03-13 09:23:05 -07:00
|
|
|
|
2019-01-23 08:16:17 -08:00
|
|
|
self.ipsec_select_backend()
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
def unconfig_interfaces(self):
|
2019-01-23 08:16:17 -08:00
|
|
|
for i in self.interfaces:
|
|
|
|
i.admin_down()
|
|
|
|
i.unconfig_ip4()
|
|
|
|
i.unconfig_ip6()
|
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
def tearDown(self):
|
|
|
|
super(TemplateIpsec, self).tearDown()
|
|
|
|
|
|
|
|
self.unconfig_interfaces()
|
|
|
|
|
2019-03-13 09:23:05 -07:00
|
|
|
def show_commands_at_teardown(self):
|
|
|
|
self.logger.info(self.vapi.cli("show hardware"))
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def gen_encrypt_pkts(self, p, sa, sw_intf, src, dst, count=1, payload_size=54):
|
|
|
|
return [
|
|
|
|
Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac)
|
|
|
|
/ sa.encrypt(IP(src=src, dst=dst) / ICMP() / Raw(b"X" * payload_size))
|
|
|
|
for i in range(count)
|
|
|
|
]
|
|
|
|
|
|
|
|
def gen_encrypt_pkts6(self, p, sa, sw_intf, src, dst, count=1, payload_size=54):
|
|
|
|
return [
|
|
|
|
Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac)
|
|
|
|
/ sa.encrypt(
|
|
|
|
IPv6(src=src, dst=dst, hlim=p.inner_hop_limit, fl=p.inner_flow_label)
|
|
|
|
/ ICMPv6EchoRequest(id=0, seq=1, data="X" * payload_size)
|
|
|
|
)
|
|
|
|
for i in range(count)
|
|
|
|
]
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2019-03-28 08:56:10 +00:00
|
|
|
def gen_pkts(self, sw_intf, src, dst, count=1, payload_size=54):
|
2022-04-26 19:02:15 +02:00
|
|
|
return [
|
|
|
|
Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac)
|
|
|
|
/ IP(src=src, dst=dst)
|
|
|
|
/ ICMP()
|
|
|
|
/ Raw(b"X" * payload_size)
|
|
|
|
for i in range(count)
|
|
|
|
]
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2021-02-09 14:04:02 +00:00
|
|
|
def gen_pkts6(self, p, sw_intf, src, dst, count=1, payload_size=54):
|
2022-04-26 19:02:15 +02:00
|
|
|
return [
|
|
|
|
Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac)
|
|
|
|
/ IPv6(src=src, dst=dst, hlim=p.inner_hop_limit, fl=p.inner_flow_label)
|
|
|
|
/ ICMPv6EchoRequest(id=0, seq=1, data="X" * payload_size)
|
|
|
|
for i in range(count)
|
|
|
|
]
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
class IpsecTcp(object):
|
|
|
|
def verify_tcp_checksum(self):
|
2022-01-26 00:15:03 -08:00
|
|
|
# start http cli server listener on http://0.0.0.0:80
|
|
|
|
self.vapi.cli("http cli server")
|
2018-09-26 11:19:00 +02:00
|
|
|
p = self.params[socket.AF_INET]
|
2022-04-26 19:02:15 +02:00
|
|
|
send = Ether(
|
|
|
|
src=self.tun_if.remote_mac, dst=self.tun_if.local_mac
|
|
|
|
) / p.scapy_tun_sa.encrypt(
|
|
|
|
IP(src=p.remote_tun_if_host, dst=self.tun_if.local_ip4)
|
|
|
|
/ TCP(flags="S", dport=80)
|
|
|
|
)
|
2018-06-24 22:49:55 +02:00
|
|
|
self.logger.debug(ppp("Sending packet:", send))
|
2018-09-26 11:19:00 +02:00
|
|
|
recv = self.send_and_expect(self.tun_if, [send], self.tun_if)
|
2018-06-24 22:49:55 +02:00
|
|
|
recv = recv[0]
|
2019-03-20 18:24:43 +00:00
|
|
|
decrypted = p.vpp_tun_sa.decrypt(recv[IP])
|
2018-06-24 22:49:55 +02:00
|
|
|
self.assert_packet_checksums_valid(decrypted)
|
|
|
|
|
|
|
|
|
2024-03-11 10:38:46 +00:00
|
|
|
@unittest.skipIf(
|
|
|
|
"hs_apps" in config.excluded_plugins, "Exclude tests requiring hs_apps plugin"
|
|
|
|
)
|
2019-04-10 12:39:10 +00:00
|
|
|
class IpsecTcpTests(IpsecTcp):
|
|
|
|
def test_tcp_checksum(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""verify checksum correctness for vpp generated packets"""
|
2019-04-10 12:39:10 +00:00
|
|
|
self.verify_tcp_checksum()
|
|
|
|
|
|
|
|
|
|
|
|
class IpsecTra4(object):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""verify methods for Transport v4"""
|
|
|
|
|
2021-02-25 10:05:32 +00:00
|
|
|
def get_replay_counts(self, p):
|
2022-08-09 03:34:51 +00:00
|
|
|
replay_node_name = "/err/%s/replay" % self.tra4_decrypt_node_name[0]
|
2021-02-25 10:05:32 +00:00
|
|
|
count = self.statistics.get_err_counter(replay_node_name)
|
|
|
|
|
|
|
|
if p.async_mode:
|
2022-04-26 19:02:15 +02:00
|
|
|
replay_post_node_name = (
|
2022-08-09 03:34:51 +00:00
|
|
|
"/err/%s/replay" % self.tra4_decrypt_node_name[p.async_mode]
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2021-02-25 10:05:32 +00:00
|
|
|
count += self.statistics.get_err_counter(replay_post_node_name)
|
|
|
|
|
|
|
|
return count
|
|
|
|
|
|
|
|
def get_hash_failed_counts(self, p):
|
2023-03-10 17:33:03 +01:00
|
|
|
if ESP == self.encryption_type and p.crypt_algo in ("AES-GCM", "AES-NULL-GMAC"):
|
2022-04-26 19:02:15 +02:00
|
|
|
hash_failed_node_name = (
|
2022-08-09 03:34:51 +00:00
|
|
|
"/err/%s/decryption_failed" % self.tra4_decrypt_node_name[p.async_mode]
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2021-02-25 10:05:32 +00:00
|
|
|
else:
|
2022-04-26 19:02:15 +02:00
|
|
|
hash_failed_node_name = (
|
2022-08-09 03:34:51 +00:00
|
|
|
"/err/%s/integ_error" % self.tra4_decrypt_node_name[p.async_mode]
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2021-02-25 10:05:32 +00:00
|
|
|
count = self.statistics.get_err_counter(hash_failed_node_name)
|
|
|
|
|
|
|
|
if p.async_mode:
|
2022-04-26 19:02:15 +02:00
|
|
|
count += self.statistics.get_err_counter("/err/crypto-dispatch/bad-hmac")
|
2021-02-25 10:05:32 +00:00
|
|
|
|
|
|
|
return count
|
|
|
|
|
2021-06-28 13:31:28 +00:00
|
|
|
def verify_hi_seq_num(self):
|
|
|
|
p = self.params[socket.AF_INET]
|
|
|
|
saf = VppEnum.vl_api_ipsec_sad_flags_t
|
|
|
|
esn_on = p.vpp_tra_sa.esn_en
|
|
|
|
ar_on = p.flags & saf.IPSEC_API_SAD_FLAG_USE_ANTI_REPLAY
|
|
|
|
|
2022-08-09 03:34:51 +00:00
|
|
|
seq_cycle_node_name = "/err/%s/seq_cycled" % self.tra4_encrypt_node_name
|
2021-06-28 13:31:28 +00:00
|
|
|
replay_count = self.get_replay_counts(p)
|
|
|
|
hash_failed_count = self.get_hash_failed_counts(p)
|
|
|
|
seq_cycle_count = self.statistics.get_err_counter(seq_cycle_node_name)
|
|
|
|
|
|
|
|
# a few packets so we get the rx seq number above the window size and
|
|
|
|
# thus can simulate a wrap with an out of window packet
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(63, 80)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
# these 4 packets will all choose seq-num 0 to decrpyt since none
|
|
|
|
# are out of window when first checked. however, once #200 has
|
|
|
|
# decrypted it will move the window to 200 and has #81 is out of
|
|
|
|
# window. this packet should be dropped.
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=200,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=81,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=201,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=202,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
|
|
|
|
# if anti-replay is off then we won't drop #81
|
|
|
|
n_rx = 3 if ar_on else 4
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if, n_rx=n_rx)
|
|
|
|
# this packet is one before the wrap
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=203,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2022-11-18 04:24:09 +00:00
|
|
|
# a replayed packet, then an out of window, then a legit
|
|
|
|
# tests that a early failure on the batch doesn't affect subsequent packets.
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=203,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=81,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=204,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
]
|
|
|
|
n_rx = 1 if ar_on else 3
|
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if, n_rx=n_rx)
|
|
|
|
|
2021-06-28 13:31:28 +00:00
|
|
|
# move the window over half way to a wrap
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=0x80000001,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
# anti-replay will drop old packets, no anti-replay will not
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=0x44000001,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
|
|
|
|
if ar_on:
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts)
|
|
|
|
else:
|
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
if esn_on:
|
|
|
|
#
|
|
|
|
# validate wrapping the ESN
|
|
|
|
#
|
|
|
|
|
|
|
|
# wrap scapy's TX SA SN
|
|
|
|
p.scapy_tra_sa.seq_num = 0x100000005
|
|
|
|
|
|
|
|
# send a packet that wraps the window for both AR and no AR
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x100000005,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
|
|
|
|
rxs = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
for rx in rxs:
|
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
|
|
|
|
|
|
|
|
# move the window forward to half way to the next wrap
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x180000005,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
|
|
|
|
rxs = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
# a packet less than 2^30 from the current position is:
|
|
|
|
# - AR: out of window and dropped
|
|
|
|
# - non-AR: accepted
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x170000005,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
|
|
|
|
if ar_on:
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts)
|
|
|
|
else:
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
# a packet more than 2^30 from the current position is:
|
|
|
|
# - AR: out of window and dropped
|
|
|
|
# - non-AR: considered a wrap, but since it's not a wrap
|
|
|
|
# it won't decrpyt and so will be dropped
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x130000005,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts)
|
|
|
|
|
|
|
|
# a packet less than 2^30 from the current position and is a
|
|
|
|
# wrap; (the seq is currently at 0x180000005).
|
|
|
|
# - AR: out of window so considered a wrap, so accepted
|
|
|
|
# - non-AR: not considered a wrap, so won't decrypt
|
|
|
|
p.scapy_tra_sa.seq_num = 0x260000005
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x260000005,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
if ar_on:
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
else:
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts)
|
|
|
|
|
|
|
|
#
|
|
|
|
# window positions are different now for AR/non-AR
|
|
|
|
# move non-AR forward
|
|
|
|
#
|
|
|
|
if not ar_on:
|
|
|
|
# a packet more than 2^30 from the current position and is a
|
|
|
|
# wrap; (the seq is currently at 0x180000005).
|
|
|
|
# - AR: accepted
|
|
|
|
# - non-AR: not considered a wrap, so won't decrypt
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x200000005,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x200000006,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
|
|
|
|
/ ICMP(),
|
|
|
|
seq_num=0x260000005,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2019-07-17 15:07:14 +00:00
|
|
|
def verify_tra_anti_replay(self):
|
2018-11-28 01:38:34 -08:00
|
|
|
p = self.params[socket.AF_INET]
|
2019-12-13 23:39:35 +00:00
|
|
|
esn_en = p.vpp_tra_sa.esn_en
|
2022-12-22 11:26:57 +00:00
|
|
|
anti_replay_window_size = p.anti_replay_window_size
|
2018-11-28 01:38:34 -08:00
|
|
|
|
2022-08-09 03:34:51 +00:00
|
|
|
seq_cycle_node_name = "/err/%s/seq_cycled" % self.tra4_encrypt_node_name
|
2021-02-25 10:05:32 +00:00
|
|
|
replay_count = self.get_replay_counts(p)
|
2022-12-22 11:26:57 +00:00
|
|
|
initial_sa_node_replay_diff = replay_count - p.tra_sa_in.get_err("replay")
|
2021-02-25 10:05:32 +00:00
|
|
|
hash_failed_count = self.get_hash_failed_counts(p)
|
2019-07-17 15:07:14 +00:00
|
|
|
seq_cycle_count = self.statistics.get_err_counter(seq_cycle_node_name)
|
2022-12-22 11:26:57 +00:00
|
|
|
initial_sa_node_cycled_diff = seq_cycle_count - p.tra_sa_in.get_err(
|
|
|
|
"seq_cycled"
|
|
|
|
)
|
2022-11-16 19:12:05 +01:00
|
|
|
hash_err = "integ_error"
|
2019-07-17 15:07:14 +00:00
|
|
|
|
|
|
|
if ESP == self.encryption_type:
|
2022-08-09 03:34:51 +00:00
|
|
|
undersize_node_name = "/err/%s/runt" % self.tra4_decrypt_node_name[0]
|
2022-04-26 19:02:15 +02:00
|
|
|
undersize_count = self.statistics.get_err_counter(undersize_node_name)
|
2022-12-22 11:26:57 +00:00
|
|
|
initial_sa_node_undersize_diff = undersize_count - p.tra_sa_in.get_err(
|
|
|
|
"runt"
|
|
|
|
)
|
2022-11-16 19:12:05 +01:00
|
|
|
# For AES-GCM an error in the hash is reported as a decryption failure
|
2023-03-10 17:33:03 +01:00
|
|
|
if p.crypt_algo in ("AES-GCM", "AES-NULL-GMAC"):
|
2022-11-16 19:12:05 +01:00
|
|
|
hash_err = "decryption_failed"
|
|
|
|
# In async mode, we don't report errors in the hash.
|
|
|
|
if p.async_mode:
|
|
|
|
hash_err = ""
|
2022-12-22 11:26:57 +00:00
|
|
|
else:
|
|
|
|
initial_sa_node_hash_diff = hash_failed_count - p.tra_sa_in.get_err(
|
|
|
|
hash_err
|
|
|
|
)
|
2019-07-17 15:07:14 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# send packets with seq numbers 1->34
|
|
|
|
# this means the window size is still in Case B (see RFC4303
|
|
|
|
# Appendix A)
|
|
|
|
#
|
|
|
|
# for reasons i haven't investigated Scapy won't create a packet with
|
|
|
|
# seq_num=0
|
|
|
|
#
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(1, 34)
|
|
|
|
]
|
2019-07-17 15:07:14 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
# replayed packets are dropped
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
2019-07-17 15:07:14 +00:00
|
|
|
replay_count += len(pkts)
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err("replay") + initial_sa_node_replay_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, replay_count)
|
2018-11-28 01:38:34 -08:00
|
|
|
|
2019-08-01 04:45:15 -07:00
|
|
|
#
|
|
|
|
# now send a batch of packets all with the same sequence number
|
|
|
|
# the first packet in the batch is legitimate, the rest bogus
|
|
|
|
#
|
2021-02-25 10:05:32 +00:00
|
|
|
self.vapi.cli("clear error")
|
|
|
|
self.vapi.cli("clear node counters")
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=35,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, pkts * 8, self.tra_if, n_rx=1)
|
2019-08-01 04:45:15 -07:00
|
|
|
replay_count += 7
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err("replay") + initial_sa_node_replay_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, replay_count)
|
2019-08-01 04:45:15 -07:00
|
|
|
|
2019-07-17 15:07:14 +00:00
|
|
|
#
|
2022-12-22 11:26:57 +00:00
|
|
|
# now move the window over to anti_replay_window_size + 100 and into Case A
|
2019-07-17 15:07:14 +00:00
|
|
|
#
|
2021-02-25 10:05:32 +00:00
|
|
|
self.vapi.cli("clear error")
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
2022-12-22 11:26:57 +00:00
|
|
|
seq_num=anti_replay_window_size + 100,
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2018-11-28 01:38:34 -08:00
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
|
|
|
|
|
2022-12-22 11:26:57 +00:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec sa 1"))
|
|
|
|
|
2019-03-21 14:34:09 +00:00
|
|
|
# replayed packets are dropped
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkt * 3, timeout=0.2)
|
2019-07-17 15:07:14 +00:00
|
|
|
replay_count += 3
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err("replay") + initial_sa_node_replay_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, replay_count)
|
2019-03-21 14:34:09 +00:00
|
|
|
|
2022-12-22 11:26:57 +00:00
|
|
|
# the window size is anti_replay_window_size packets
|
2018-11-28 01:38:34 -08:00
|
|
|
# in window are still accepted
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=200,
|
|
|
|
)
|
2018-11-28 01:38:34 -08:00
|
|
|
|
|
|
|
# a packet that does not decrypt does not move the window forward
|
2022-04-26 19:02:15 +02:00
|
|
|
bogus_sa = SecurityAssociation(
|
|
|
|
self.encryption_type,
|
2022-11-16 18:45:24 +01:00
|
|
|
p.scapy_tra_spi,
|
2022-04-26 19:02:15 +02:00
|
|
|
crypt_algo=p.crypt_algo,
|
|
|
|
crypt_key=mk_scapy_crypt_key(p)[::-1],
|
|
|
|
auth_algo=p.auth_algo,
|
|
|
|
auth_key=p.auth_key[::-1],
|
|
|
|
)
|
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / bogus_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
2022-12-22 11:26:57 +00:00
|
|
|
seq_num=anti_replay_window_size + 200,
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkt * 17, timeout=0.2)
|
2018-11-28 01:38:34 -08:00
|
|
|
|
2019-07-17 15:07:14 +00:00
|
|
|
hash_failed_count += 17
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
2022-11-16 19:12:05 +01:00
|
|
|
if hash_err != "":
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err(hash_err) + initial_sa_node_hash_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, hash_failed_count)
|
2018-11-28 01:38:34 -08:00
|
|
|
|
2019-04-24 23:39:16 +02:00
|
|
|
# a malformed 'runt' packet
|
|
|
|
# created by a mis-constructed SA
|
2022-04-26 19:02:15 +02:00
|
|
|
if ESP == self.encryption_type and p.crypt_algo != "NULL":
|
2022-11-16 18:45:24 +01:00
|
|
|
bogus_sa = SecurityAssociation(self.encryption_type, p.scapy_tra_spi)
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / bogus_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
2022-12-22 11:26:57 +00:00
|
|
|
seq_num=anti_replay_window_size + 200,
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkt * 17, timeout=0.2)
|
2019-04-24 23:39:16 +02:00
|
|
|
|
2019-07-17 15:07:14 +00:00
|
|
|
undersize_count += 17
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_error_counter_equal(undersize_node_name, undersize_count)
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err("runt") + initial_sa_node_undersize_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, undersize_count)
|
2019-04-24 23:39:16 +02:00
|
|
|
|
2018-11-28 01:38:34 -08:00
|
|
|
# which we can determine since this packet is still in the window
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=234,
|
|
|
|
)
|
2018-12-10 13:46:09 +01:00
|
|
|
self.send_and_expect(self.tra_if, [pkt], self.tra_if)
|
2018-11-28 01:38:34 -08:00
|
|
|
|
2019-07-17 15:07:14 +00:00
|
|
|
#
|
2019-03-21 14:34:09 +00:00
|
|
|
# out of window are dropped
|
2019-07-17 15:07:14 +00:00
|
|
|
# this is Case B. So VPP will consider this to be a high seq num wrap
|
|
|
|
# and so the decrypt attempt will fail
|
|
|
|
#
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=17,
|
|
|
|
)
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkt * 17, timeout=0.2)
|
2019-03-21 14:34:09 +00:00
|
|
|
|
2019-12-13 23:39:35 +00:00
|
|
|
if esn_en:
|
2019-03-21 14:34:09 +00:00
|
|
|
# an out of window error with ESN looks like a high sequence
|
|
|
|
# wrap. but since it isn't then the verify will fail.
|
2019-07-17 15:07:14 +00:00
|
|
|
hash_failed_count += 17
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
2022-11-16 19:12:05 +01:00
|
|
|
if hash_err != "":
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err(hash_err) + initial_sa_node_hash_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, hash_failed_count)
|
2019-03-21 14:34:09 +00:00
|
|
|
|
|
|
|
else:
|
2019-07-17 15:07:14 +00:00
|
|
|
replay_count += 17
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err("replay") + initial_sa_node_replay_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, replay_count)
|
2019-03-21 14:34:09 +00:00
|
|
|
|
2022-12-22 11:26:57 +00:00
|
|
|
# valid packet moves the window over to anti_replay_window_size + 258
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
2022-12-22 11:26:57 +00:00
|
|
|
seq_num=anti_replay_window_size + 258,
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2019-03-21 14:34:09 +00:00
|
|
|
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
|
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
|
|
|
|
|
2019-07-17 15:07:14 +00:00
|
|
|
#
|
|
|
|
# move VPP's SA TX seq-num to just before the seq-number wrap.
|
|
|
|
# then fire in a packet that VPP should drop on TX because it
|
|
|
|
# causes the TX seq number to wrap; unless we're using extened sequence
|
|
|
|
# numbers.
|
|
|
|
#
|
2022-11-16 18:45:24 +01:00
|
|
|
self.vapi.cli("test ipsec sa %d seq 0xffffffff" % p.vpp_tra_sa_id)
|
2019-07-17 15:07:14 +00:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec sa 0"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec sa 1"))
|
2019-03-21 16:36:28 +00:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(259, 280)
|
|
|
|
]
|
2019-03-21 14:34:09 +00:00
|
|
|
|
2019-12-13 23:39:35 +00:00
|
|
|
if esn_en:
|
2019-07-17 15:07:14 +00:00
|
|
|
rxs = self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
2019-03-21 14:34:09 +00:00
|
|
|
|
2019-07-17 15:07:14 +00:00
|
|
|
#
|
|
|
|
# in order for scapy to decrypt its SA's high order number needs
|
|
|
|
# to wrap
|
|
|
|
#
|
|
|
|
p.vpp_tra_sa.seq_num = 0x100000000
|
|
|
|
for rx in rxs:
|
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
|
|
|
|
|
|
|
|
#
|
|
|
|
# wrap scapy's TX high sequence number. VPP is in case B, so it
|
|
|
|
# will consider this a high seq wrap also.
|
|
|
|
# The low seq num we set it to will place VPP's RX window in Case A
|
|
|
|
#
|
2019-03-21 14:34:09 +00:00
|
|
|
p.scapy_tra_sa.seq_num = 0x100000005
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=0x100000005,
|
|
|
|
)
|
2019-03-21 14:34:09 +00:00
|
|
|
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
|
2021-06-28 13:31:28 +00:00
|
|
|
|
2019-03-21 14:34:09 +00:00
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
|
2019-07-17 15:07:14 +00:00
|
|
|
|
|
|
|
#
|
2022-12-22 11:26:57 +00:00
|
|
|
# A packet that has seq num between (2^32-anti_replay_window_size)+4 and 5 is within
|
2019-07-17 15:07:14 +00:00
|
|
|
# the window
|
|
|
|
#
|
2022-04-26 19:02:15 +02:00
|
|
|
p.scapy_tra_sa.seq_num = 0xFFFFFFFD
|
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=0xFFFFFFFD,
|
|
|
|
)
|
2019-07-17 15:07:14 +00:00
|
|
|
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
|
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
|
|
|
|
|
|
|
|
#
|
|
|
|
# While in case A we cannot wrap the high sequence number again
|
2021-06-28 13:31:28 +00:00
|
|
|
# because VPP will consider this packet to be one that moves the
|
2019-07-17 15:07:14 +00:00
|
|
|
# window forward
|
|
|
|
#
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=0x200000999,
|
|
|
|
)
|
|
|
|
self.send_and_assert_no_replies(
|
|
|
|
self.tra_if, [pkt], self.tra_if, timeout=0.2
|
|
|
|
)
|
2019-07-17 15:07:14 +00:00
|
|
|
|
|
|
|
hash_failed_count += 1
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
2022-11-16 19:12:05 +01:00
|
|
|
if hash_err != "":
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_in.get_err(hash_err) + initial_sa_node_hash_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, hash_failed_count)
|
2019-07-17 15:07:14 +00:00
|
|
|
|
|
|
|
#
|
2021-06-28 13:31:28 +00:00
|
|
|
# but if we move the window forward to case B, then we can wrap
|
2019-07-17 15:07:14 +00:00
|
|
|
# again
|
|
|
|
#
|
2022-12-22 11:26:57 +00:00
|
|
|
p.scapy_tra_sa.seq_num = 0x100000000 + anti_replay_window_size + 0x555
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
2022-12-22 11:26:57 +00:00
|
|
|
seq_num=p.scapy_tra_sa.seq_num,
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2019-07-17 15:07:14 +00:00
|
|
|
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
|
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
|
|
|
|
|
|
|
|
p.scapy_tra_sa.seq_num = 0x200000444
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = Ether(
|
|
|
|
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
|
|
|
|
) / p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=0x200000444,
|
|
|
|
)
|
2019-07-17 15:07:14 +00:00
|
|
|
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
|
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
|
|
|
|
|
2019-03-21 14:34:09 +00:00
|
|
|
else:
|
2019-07-17 15:07:14 +00:00
|
|
|
#
|
|
|
|
# without ESN TX sequence numbers can't wrap and packets are
|
|
|
|
# dropped from here on out.
|
|
|
|
#
|
2021-06-28 13:31:28 +00:00
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
2019-07-17 15:07:14 +00:00
|
|
|
seq_cycle_count += len(pkts)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_error_counter_equal(seq_cycle_node_name, seq_cycle_count)
|
2022-12-22 11:26:57 +00:00
|
|
|
err = p.tra_sa_out.get_err("seq_cycled") + initial_sa_node_cycled_diff
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(err, seq_cycle_count)
|
2019-03-21 16:36:28 +00:00
|
|
|
|
2018-11-28 01:38:34 -08:00
|
|
|
# move the security-associations seq number on to the last we used
|
2019-03-21 16:36:28 +00:00
|
|
|
self.vapi.cli("test ipsec sa %d seq 0x15f" % p.scapy_tra_sa_id)
|
2018-11-28 01:38:34 -08:00
|
|
|
p.scapy_tra_sa.seq_num = 351
|
|
|
|
p.vpp_tra_sa.seq_num = 351
|
|
|
|
|
2021-09-21 12:34:19 +00:00
|
|
|
def verify_tra_lost(self):
|
|
|
|
p = self.params[socket.AF_INET]
|
|
|
|
esn_en = p.vpp_tra_sa.esn_en
|
|
|
|
|
|
|
|
#
|
|
|
|
# send packets with seq numbers 1->34
|
|
|
|
# this means the window size is still in Case B (see RFC4303
|
|
|
|
# Appendix A)
|
|
|
|
#
|
|
|
|
# for reasons i haven't investigated Scapy won't create a packet with
|
|
|
|
# seq_num=0
|
|
|
|
#
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(1, 3)
|
|
|
|
]
|
2021-09-21 12:34:19 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
|
2021-09-21 12:34:19 +00:00
|
|
|
|
|
|
|
# skip a sequence number
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(4, 6)
|
|
|
|
]
|
2021-09-21 12:34:19 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
|
2021-09-21 12:34:19 +00:00
|
|
|
|
|
|
|
# the lost packet are counted untill we get up past the first
|
|
|
|
# sizeof(replay_window) packets
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(6, 100)
|
|
|
|
]
|
2021-09-21 12:34:19 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(p.tra_sa_in.get_err("lost"), 1)
|
2021-09-21 12:34:19 +00:00
|
|
|
|
|
|
|
# lost of holes in the sequence
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(100, 200, 2)
|
|
|
|
]
|
2021-09-21 12:34:19 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if, n_rx=50)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(200, 300)
|
|
|
|
]
|
2021-09-21 12:34:19 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(p.tra_sa_in.get_err("lost"), 51)
|
2021-09-21 12:34:19 +00:00
|
|
|
|
|
|
|
# a big hole in the seq number space
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(400, 500)
|
|
|
|
]
|
2021-09-21 12:34:19 +00:00
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(p.tra_sa_in.get_err("lost"), 151)
|
2021-09-21 12:34:19 +00:00
|
|
|
|
2020-02-04 09:36:04 +00:00
|
|
|
def verify_tra_basic4(self, count=1, payload_size=54):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec v4 transport basic test"""
|
2018-11-13 11:12:57 +01:00
|
|
|
self.vapi.cli("clear errors")
|
2019-07-17 15:07:14 +00:00
|
|
|
self.vapi.cli("clear ipsec sa")
|
2018-06-24 22:49:55 +02:00
|
|
|
try:
|
2018-09-26 11:19:00 +02:00
|
|
|
p = self.params[socket.AF_INET]
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts(
|
|
|
|
p,
|
|
|
|
p.scapy_tra_sa,
|
|
|
|
self.tra_if,
|
|
|
|
src=self.tra_if.remote_ip4,
|
|
|
|
dst=self.tra_if.local_ip4,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, send_pkts, self.tra_if)
|
2018-11-28 01:38:34 -08:00
|
|
|
for rx in recv_pkts:
|
2019-04-18 19:49:13 -07:00
|
|
|
self.assertEqual(len(rx) - len(Ether()), rx[IP].len)
|
|
|
|
self.assert_packet_checksums_valid(rx)
|
2018-09-26 11:19:00 +02:00
|
|
|
try:
|
2018-11-28 01:38:34 -08:00
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[IP])
|
2018-09-26 11:19:00 +02:00
|
|
|
self.assert_packet_checksums_valid(decrypted)
|
|
|
|
except:
|
2018-11-28 01:38:34 -08:00
|
|
|
self.logger.debug(ppp("Unexpected packet:", rx))
|
2018-09-26 11:19:00 +02:00
|
|
|
raise
|
2018-06-24 22:49:55 +02:00
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
2019-05-10 20:41:08 -04:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = p.tra_sa_in.get_stats()["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts, count, "incorrect SA in counts: expected %d != %d" % (count, pkts)
|
|
|
|
)
|
|
|
|
pkts = p.tra_sa_out.get_stats()["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts, count, "incorrect SA out counts: expected %d != %d" % (count, pkts)
|
|
|
|
)
|
2022-11-16 19:12:05 +01:00
|
|
|
self.assertEqual(p.tra_sa_out.get_err("lost"), 0)
|
|
|
|
self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
|
2019-02-17 18:04:27 +00:00
|
|
|
|
2018-11-13 11:12:57 +01:00
|
|
|
self.assert_packet_counter_equal(self.tra4_encrypt_node_name, count)
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assert_packet_counter_equal(self.tra4_decrypt_node_name[0], count)
|
2018-11-13 11:12:57 +01:00
|
|
|
|
2022-12-22 11:26:57 +00:00
|
|
|
def _verify_tra_anti_replay_algorithm_esn(self):
|
|
|
|
def seq_num(seqh, seql):
|
|
|
|
return (seqh << 32) | (seql & 0xFFFF_FFFF)
|
|
|
|
|
|
|
|
p = self.params[socket.AF_INET]
|
|
|
|
anti_replay_window_size = p.anti_replay_window_size
|
|
|
|
|
|
|
|
seq_cycle_node_name = "/err/%s/seq_cycled" % self.tra4_encrypt_node_name
|
|
|
|
replay_count = self.get_replay_counts(p)
|
|
|
|
hash_failed_count = self.get_hash_failed_counts(p)
|
|
|
|
seq_cycle_count = self.statistics.get_err_counter(seq_cycle_node_name)
|
|
|
|
|
|
|
|
if ESP == self.encryption_type:
|
|
|
|
undersize_node_name = "/err/%s/runt" % self.tra4_decrypt_node_name[0]
|
|
|
|
undersize_count = self.statistics.get_err_counter(undersize_node_name)
|
|
|
|
|
|
|
|
# reset the TX SA to avoid conflict with left configuration
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.vpp_tra_sa_id} seq 0x0")
|
|
|
|
|
|
|
|
"""
|
|
|
|
RFC 4303 Appendix A2. Case A
|
|
|
|
|
|
|
|
|: new Th marker
|
|
|
|
a-i: possible seq num received
|
|
|
|
+: Bl, Tl, Bl', Tl'
|
|
|
|
[BT]l(sign) = [BT]l (sign) 2^32 mod 2^32 (Th inc/dec-remented by 1)
|
|
|
|
|
|
|
|
Th - 1 Th Th + 1
|
|
|
|
--|--a--+---b---+-c--|--d--+---e---+-f--|--g--+---h---+--i-|--
|
|
|
|
========= ========= =========
|
|
|
|
Bl- Tl- Bl Tl Bl+ Tl+
|
|
|
|
|
|
|
|
Case A implies Tl >= W - 1
|
|
|
|
"""
|
|
|
|
|
|
|
|
Th = 1
|
|
|
|
Tl = anti_replay_window_size + 40
|
|
|
|
Bl = Tl - anti_replay_window_size + 1
|
|
|
|
|
|
|
|
# move VPP's RX AR window to Case A
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.scapy_tra_sa_id} seq {seq_num(Th, Tl):#x}")
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th, Tl)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case a: Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet wrap the window
|
|
|
|
-> Seqh = Th + 1
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th - 1, Bl - 20), seq_num(Th - 1, Bl - 5))
|
|
|
|
]
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
hash_failed_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case b: Bl <= Seql <= Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> Seqh = Th
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, Tl - 10), seq_num(Th, Tl - 5))
|
|
|
|
]
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th - 1, Tl)
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th - 1, Tl - 35), seq_num(Th - 1, Tl - 5))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# some packets are rejected by the pre-crypto check
|
|
|
|
replay_count += 5
|
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts) - 5
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case c: Seql > Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet does not wrap the window
|
|
|
|
-> Seqh = Th
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th - 1, Tl + 5), seq_num(Th - 1, Tl + 20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case d: Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet wrap the window
|
|
|
|
-> Seqh = Th + 1
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th, Tl)
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, Bl - 20), seq_num(Th, Bl - 5))
|
|
|
|
]
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
hash_failed_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case e: Bl <= Seql <= Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> Seqh = Th
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> Seql is marked in the AR window
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, Bl + 10), seq_num(Th, Bl + 30))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case f: Seql > Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet does not wrap the window
|
|
|
|
-> Seqh = Th
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> AR window shift (the window stays Case A)
|
|
|
|
-> Seql is marked in the AR window
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, Tl + 50), seq_num(Th, Tl + 60))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case g: Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet wrap the window
|
|
|
|
-> Seqh = Th + 1
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> AR window shift (may set the window in Case B)
|
|
|
|
-> Seql is marked in the AR window
|
|
|
|
"""
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th + 1, Tl)
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
# set the window in Case B (the minimum window size is 64
|
|
|
|
# so we are sure to overlap)
|
|
|
|
for seq in range(seq_num(Th + 1, 10), seq_num(Th + 1, 20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
# reset the VPP's RX AR window to Case A
|
|
|
|
Th = 1
|
|
|
|
Tl = 2 * anti_replay_window_size + 40
|
|
|
|
Bl = Tl - anti_replay_window_size + 1
|
|
|
|
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.scapy_tra_sa_id} seq {seq_num(Th, Tl):#x}")
|
|
|
|
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th + 1, Tl)
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
# the AR will stay in Case A
|
|
|
|
for seq in range(
|
|
|
|
seq_num(Th + 1, anti_replay_window_size + 10),
|
|
|
|
seq_num(Th + 1, anti_replay_window_size + 20),
|
|
|
|
)
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case h: Bl <= Seql <= Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> Seqh = Th
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
- integrity check: the wrap is not detected, should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
Th += 1
|
|
|
|
Tl = anti_replay_window_size + 20
|
|
|
|
Bl = Tl - anti_replay_window_size + 1
|
|
|
|
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th + 1, Tl)
|
|
|
|
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th + 1, Tl - 20), seq_num(Th + 1, Tl - 5))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# some packets are rejected by the pre-crypto check
|
|
|
|
replay_count += 5
|
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts) - 5
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case i: Seql > Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet does not wrap the window
|
|
|
|
-> Seqh = Th
|
|
|
|
- integrity check: the wrap is not detected, shoud fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th + 1, Tl + 5), seq_num(Th + 1, Tl + 15))
|
|
|
|
]
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
hash_failed_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
RFC 4303 Appendix A2. Case B
|
|
|
|
|
|
|
|
Th - 1 Th Th + 1
|
|
|
|
----|-a-+-----b----+--c--|-d-+----e-----+--f--|-g-+--h---
|
|
|
|
========= =========== ===========
|
|
|
|
Tl- Bl Tl Bl+ Tl+
|
|
|
|
|
|
|
|
Case B implies Tl < W - 1
|
|
|
|
"""
|
|
|
|
|
|
|
|
# reset the VPP's RX AR window to Case B
|
|
|
|
Th = 2
|
|
|
|
Tl = 30 # minimum window size of 64, we are sure to overlap
|
|
|
|
Bl = (Tl - anti_replay_window_size + 1) % (1 << 32)
|
|
|
|
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.scapy_tra_sa_id} seq {seq_num(Th, Tl):#x}")
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th, Tl)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case a: Seql <= Tl < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> Seqh = Th
|
|
|
|
-> check for replayed packet
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, 5), seq_num(Th, 10))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th - 1, Tl)
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th - 1, 0), seq_num(Th - 1, 15))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# some packets are rejected by the pre-crypto check
|
|
|
|
replay_count += 5
|
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts) - 5
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case b: Tl < Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet will shift the window
|
|
|
|
-> Seqh = Th
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th - 1, Tl + 10), seq_num(Th - 1, Tl + 20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case c: Tl < Bl <= Seql
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> Seqh = Th - 1
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> Seql is marked in the AR window
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th - 1, Bl + 10), seq_num(Th - 1, Bl + 20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case d: Seql <= Tl < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is the window
|
|
|
|
-> Seqh = Th
|
|
|
|
-> check for replayed packet
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> Seql is marked in the AR window
|
|
|
|
"""
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th, Tl)
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, 15), seq_num(Th, 25))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case e: Tl < Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> Seqh = Th
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> AR window shift (may set the window in Case A)
|
|
|
|
-> Seql is marked in the AR window
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, Tl + 5), seq_num(Th, Tl + 15))
|
|
|
|
]
|
|
|
|
|
|
|
|
# the window stays in Case B
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(
|
|
|
|
seq_num(Th, Tl + anti_replay_window_size + 5),
|
|
|
|
seq_num(Th, Tl + anti_replay_window_size + 15),
|
|
|
|
)
|
|
|
|
]
|
|
|
|
|
|
|
|
# the window moves to Case A
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
# reset the VPP's RX AR window to Case B
|
|
|
|
Th = 2
|
|
|
|
Tl = 30 # minimum window size of 64, we are sure to overlap
|
|
|
|
Bl = (Tl - anti_replay_window_size + 1) % (1 << 32)
|
|
|
|
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.scapy_tra_sa_id} seq {seq_num(Th, Tl):#x}")
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th, Tl)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case f: Tl < Bl <= Seql
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the previous window
|
|
|
|
-> Seqh = Th - 1
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, Bl + 10), seq_num(Th, Bl + 20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case g: Seql <= Tl < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is the window
|
|
|
|
-> Seqh = Th
|
|
|
|
-> check for replayed packet
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th, 10), seq_num(Th, 15))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Th + 1, Tl)
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th + 1, 0), seq_num(Th + 1, 15))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# some packets are rejected by the pre-crypto check
|
|
|
|
replay_count += 5
|
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts) - 5
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case h: Tl < Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet will shift the window
|
|
|
|
-> Seqh = Th
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Th + 1, Tl + 10), seq_num(Th + 1, Tl + 20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# out-of-window packets fail integrity check
|
|
|
|
hash_failed_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
|
|
|
|
|
|
|
|
def _verify_tra_anti_replay_algorithm_no_esn(self):
|
|
|
|
def seq_num(seql):
|
|
|
|
return seql & 0xFFFF_FFFF
|
|
|
|
|
|
|
|
p = self.params[socket.AF_INET]
|
|
|
|
anti_replay_window_size = p.anti_replay_window_size
|
|
|
|
|
|
|
|
seq_cycle_node_name = "/err/%s/seq_cycled" % self.tra4_encrypt_node_name
|
|
|
|
replay_count = self.get_replay_counts(p)
|
|
|
|
hash_failed_count = self.get_hash_failed_counts(p)
|
|
|
|
seq_cycle_count = self.statistics.get_err_counter(seq_cycle_node_name)
|
|
|
|
|
|
|
|
if ESP == self.encryption_type:
|
|
|
|
undersize_node_name = "/err/%s/runt" % self.tra4_decrypt_node_name[0]
|
|
|
|
undersize_count = self.statistics.get_err_counter(undersize_node_name)
|
|
|
|
|
|
|
|
# reset the TX SA to avoid conflict with left configuration
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.vpp_tra_sa_id} seq 0x0")
|
|
|
|
|
|
|
|
"""
|
|
|
|
RFC 4303 Appendix A2. Case A
|
|
|
|
|
|
|
|
a-c: possible seq num received
|
|
|
|
+: Bl, Tl
|
|
|
|
|
|
|
|
|--a--+---b---+-c--|
|
|
|
|
=========
|
|
|
|
Bl Tl
|
|
|
|
|
|
|
|
No ESN implies Th = 0
|
|
|
|
Case A implies Tl >= W - 1
|
|
|
|
"""
|
|
|
|
|
|
|
|
Tl = anti_replay_window_size + 40
|
|
|
|
Bl = Tl - anti_replay_window_size + 1
|
|
|
|
|
|
|
|
# move VPP's RX AR window to Case A
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.scapy_tra_sa_id} seq {seq_num(Tl):#x}")
|
|
|
|
p.scapy_tra_sa.seq_num = seq_num(Tl)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case a: Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is out of window
|
|
|
|
-> packet should be dropped
|
|
|
|
- integrity check: ...
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Bl - 20), seq_num(Bl - 5))
|
|
|
|
]
|
|
|
|
|
|
|
|
# out-of-window packets
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
replay_count += len(pkts)
|
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case b: Bl <= Seql <= Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check:
|
|
|
|
-> check for a replayed packet with Seql
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Tl - 50), seq_num(Tl - 30))
|
|
|
|
]
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Tl - 35), seq_num(Tl - 30))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
|
|
|
|
|
|
|
|
# replayed packets
|
|
|
|
replay_count += 5
|
|
|
|
self.assertEqual(self.get_replay_counts(p), replay_count)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case c: Seql > Tl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet will shift the window
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> AR window is shifted
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(Tl + 5), seq_num(Tl + 20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
"""
|
|
|
|
RFC 4303 Appendix A2. Case B
|
|
|
|
|
|
|
|
|-a-----+------b-----|
|
|
|
|
=========
|
|
|
|
Tl
|
|
|
|
|
|
|
|
Case B implies Tl < W - 1
|
|
|
|
"""
|
|
|
|
|
|
|
|
# reset the VPP's RX AR window to Case B
|
|
|
|
Tl = 30 # minimum window size of 64, we are sure to overlap
|
|
|
|
Bl = seq_num(Tl - anti_replay_window_size + 1)
|
|
|
|
|
|
|
|
self.vapi.cli(f"test ipsec sa {p.scapy_tra_sa_id} seq {seq_num(Tl):#x}")
|
|
|
|
|
|
|
|
"""
|
|
|
|
case a: Seql <= Tl < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet is in the window
|
|
|
|
-> check for replayed packet
|
|
|
|
- integrity check: should fail
|
|
|
|
- post-crypto check: ...
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(5), seq_num(10))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
"""
|
|
|
|
case b: Tl < Seql < Bl
|
|
|
|
- pre-crypto check: algorithm predicts that the packet will shift the window
|
|
|
|
- integrity check: should pass
|
|
|
|
- post-crypto check: should pass
|
|
|
|
-> AR window is shifted
|
|
|
|
"""
|
|
|
|
pkts = [
|
|
|
|
(
|
|
|
|
Ether(src=self.tra_if.remote_mac, dst=self.tra_if.local_mac)
|
|
|
|
/ p.scapy_tra_sa.encrypt(
|
|
|
|
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4) / ICMP(),
|
|
|
|
seq_num=seq,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
for seq in range(seq_num(-50), seq_num(-20))
|
|
|
|
]
|
|
|
|
|
|
|
|
self.send_and_expect(self.tra_if, pkts, self.tra_if)
|
|
|
|
|
|
|
|
def verify_tra_anti_replay_algorithm(self):
|
|
|
|
if self.params[socket.AF_INET].vpp_tra_sa.esn_en:
|
|
|
|
self._verify_tra_anti_replay_algorithm_esn()
|
|
|
|
else:
|
|
|
|
self._verify_tra_anti_replay_algorithm_no_esn()
|
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
|
2024-03-11 10:38:46 +00:00
|
|
|
@unittest.skipIf(
|
|
|
|
"ping" in config.excluded_plugins, "Exclude tests requiring Ping plugin"
|
|
|
|
)
|
2019-04-10 12:39:10 +00:00
|
|
|
class IpsecTra4Tests(IpsecTra4):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Transport v4"""
|
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
def test_tra_anti_replay(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec v4 transport anti-replay test"""
|
2019-07-17 15:07:14 +00:00
|
|
|
self.verify_tra_anti_replay()
|
2019-04-10 12:39:10 +00:00
|
|
|
|
2022-12-22 11:26:57 +00:00
|
|
|
def test_tra_anti_replay_algorithm(self):
|
|
|
|
"""ipsec v4 transport anti-replay algorithm test"""
|
|
|
|
self.verify_tra_anti_replay_algorithm()
|
|
|
|
|
2021-09-21 12:34:19 +00:00
|
|
|
def test_tra_lost(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec v4 transport lost packet test"""
|
2021-09-21 12:34:19 +00:00
|
|
|
self.verify_tra_lost()
|
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
def test_tra_basic(self, count=1):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec v4 transport basic test"""
|
2019-04-10 12:39:10 +00:00
|
|
|
self.verify_tra_basic4(count=1)
|
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
def test_tra_burst(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec v4 transport burst test"""
|
2019-04-10 12:39:10 +00:00
|
|
|
self.verify_tra_basic4(count=257)
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2019-02-25 14:32:02 +00:00
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
class IpsecTra6(object):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""verify methods for Transport v6"""
|
|
|
|
|
2020-02-04 09:36:04 +00:00
|
|
|
def verify_tra_basic6(self, count=1, payload_size=54):
|
2018-11-13 11:12:57 +01:00
|
|
|
self.vapi.cli("clear errors")
|
2020-02-04 09:36:04 +00:00
|
|
|
self.vapi.cli("clear ipsec sa")
|
2018-06-24 22:49:55 +02:00
|
|
|
try:
|
2018-09-26 11:19:00 +02:00
|
|
|
p = self.params[socket.AF_INET6]
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts6(
|
|
|
|
p,
|
|
|
|
p.scapy_tra_sa,
|
|
|
|
self.tra_if,
|
|
|
|
src=self.tra_if.remote_ip6,
|
|
|
|
dst=self.tra_if.local_ip6,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(self.tra_if, send_pkts, self.tra_if)
|
2018-11-28 01:38:34 -08:00
|
|
|
for rx in recv_pkts:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assertEqual(len(rx) - len(Ether()) - len(IPv6()), rx[IPv6].plen)
|
2018-09-26 11:19:00 +02:00
|
|
|
try:
|
2018-11-28 01:38:34 -08:00
|
|
|
decrypted = p.vpp_tra_sa.decrypt(rx[IPv6])
|
2018-09-26 11:19:00 +02:00
|
|
|
self.assert_packet_checksums_valid(decrypted)
|
|
|
|
except:
|
2018-11-28 01:38:34 -08:00
|
|
|
self.logger.debug(ppp("Unexpected packet:", rx))
|
2018-09-26 11:19:00 +02:00
|
|
|
raise
|
2018-06-24 22:49:55 +02:00
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
2019-05-10 20:41:08 -04:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
pkts = p.tra_sa_in.get_stats()["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts, count, "incorrect SA in counts: expected %d != %d" % (count, pkts)
|
|
|
|
)
|
|
|
|
pkts = p.tra_sa_out.get_stats()["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts, count, "incorrect SA out counts: expected %d != %d" % (count, pkts)
|
|
|
|
)
|
2018-11-13 11:12:57 +01:00
|
|
|
self.assert_packet_counter_equal(self.tra6_encrypt_node_name, count)
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assert_packet_counter_equal(self.tra6_decrypt_node_name[0], count)
|
2018-11-13 11:12:57 +01:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def gen_encrypt_pkts_ext_hdrs6(
|
|
|
|
self, sa, sw_intf, src, dst, count=1, payload_size=54
|
|
|
|
):
|
|
|
|
return [
|
|
|
|
Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac)
|
|
|
|
/ sa.encrypt(
|
|
|
|
IPv6(src=src, dst=dst)
|
|
|
|
/ ICMPv6EchoRequest(id=0, seq=1, data="X" * payload_size)
|
|
|
|
)
|
|
|
|
for i in range(count)
|
|
|
|
]
|
2019-12-20 00:54:57 +00:00
|
|
|
|
|
|
|
def gen_pkts_ext_hdrs6(self, sw_intf, src, dst, count=1, payload_size=54):
|
2022-04-26 19:02:15 +02:00
|
|
|
return [
|
|
|
|
Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac)
|
|
|
|
/ IPv6(src=src, dst=dst)
|
|
|
|
/ IPv6ExtHdrHopByHop()
|
2024-08-01 15:09:26 +02:00
|
|
|
/ IPv6ExtHdrFragment(id=2, offset=0)
|
2022-04-26 19:02:15 +02:00
|
|
|
/ Raw(b"\xff" * 200)
|
|
|
|
for i in range(count)
|
|
|
|
]
|
2019-12-20 00:54:57 +00:00
|
|
|
|
|
|
|
def verify_tra_encrypted6(self, p, sa, rxs):
|
|
|
|
decrypted = []
|
|
|
|
for rx in rxs:
|
|
|
|
self.assert_packet_checksums_valid(rx)
|
|
|
|
try:
|
|
|
|
decrypt_pkt = p.vpp_tra_sa.decrypt(rx[IPv6])
|
|
|
|
decrypted.append(decrypt_pkt)
|
|
|
|
self.assert_equal(decrypt_pkt.src, self.tra_if.local_ip6)
|
|
|
|
self.assert_equal(decrypt_pkt.dst, self.tra_if.remote_ip6)
|
|
|
|
except:
|
|
|
|
self.logger.debug(ppp("Unexpected packet:", rx))
|
|
|
|
try:
|
|
|
|
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
return decrypted
|
|
|
|
|
|
|
|
def verify_tra_66_ext_hdrs(self, p):
|
|
|
|
count = 63
|
|
|
|
|
|
|
|
#
|
|
|
|
# check we can decrypt with options
|
|
|
|
#
|
2022-04-26 19:02:15 +02:00
|
|
|
tx = self.gen_encrypt_pkts_ext_hdrs6(
|
|
|
|
p.scapy_tra_sa,
|
|
|
|
self.tra_if,
|
|
|
|
src=self.tra_if.remote_ip6,
|
|
|
|
dst=self.tra_if.local_ip6,
|
|
|
|
count=count,
|
|
|
|
)
|
2019-12-20 00:54:57 +00:00
|
|
|
self.send_and_expect(self.tra_if, tx, self.tra_if)
|
|
|
|
|
|
|
|
#
|
|
|
|
# injecting a packet from ourselves to be routed of box is a hack
|
|
|
|
# but it matches an outbout policy, alors je ne regrette rien
|
|
|
|
#
|
|
|
|
|
|
|
|
# one extension before ESP
|
2022-04-26 19:02:15 +02:00
|
|
|
tx = (
|
|
|
|
Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac)
|
|
|
|
/ IPv6(src=self.tra_if.local_ip6, dst=self.tra_if.remote_ip6)
|
2024-08-01 15:09:26 +02:00
|
|
|
/ IPv6ExtHdrFragment(id=2, offset=0)
|
2022-04-26 19:02:15 +02:00
|
|
|
/ Raw(b"\xff" * 200)
|
|
|
|
)
|
2019-12-20 00:54:57 +00:00
|
|
|
|
|
|
|
rxs = self.send_and_expect(self.pg2, [tx], self.tra_if)
|
|
|
|
dcs = self.verify_tra_encrypted6(p, p.vpp_tra_sa, rxs)
|
|
|
|
|
|
|
|
for dc in dcs:
|
|
|
|
# for reasons i'm not going to investigate scapy does not
|
|
|
|
# created the correct headers after decrypt. but reparsing
|
|
|
|
# the ipv6 packet fixes it
|
|
|
|
dc = IPv6(raw(dc[IPv6]))
|
|
|
|
self.assert_equal(dc[IPv6ExtHdrFragment].id, 2)
|
|
|
|
|
|
|
|
# two extensions before ESP
|
2022-04-26 19:02:15 +02:00
|
|
|
tx = (
|
|
|
|
Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac)
|
|
|
|
/ IPv6(src=self.tra_if.local_ip6, dst=self.tra_if.remote_ip6)
|
|
|
|
/ IPv6ExtHdrHopByHop()
|
2024-08-01 15:09:26 +02:00
|
|
|
/ IPv6ExtHdrFragment(id=2, offset=0)
|
2022-04-26 19:02:15 +02:00
|
|
|
/ Raw(b"\xff" * 200)
|
|
|
|
)
|
2019-12-20 00:54:57 +00:00
|
|
|
|
|
|
|
rxs = self.send_and_expect(self.pg2, [tx], self.tra_if)
|
|
|
|
dcs = self.verify_tra_encrypted6(p, p.vpp_tra_sa, rxs)
|
|
|
|
|
|
|
|
for dc in dcs:
|
|
|
|
dc = IPv6(raw(dc[IPv6]))
|
|
|
|
self.assertTrue(dc[IPv6ExtHdrHopByHop])
|
|
|
|
self.assert_equal(dc[IPv6ExtHdrFragment].id, 2)
|
|
|
|
|
|
|
|
# two extensions before ESP, one after
|
2022-04-26 19:02:15 +02:00
|
|
|
tx = (
|
|
|
|
Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac)
|
|
|
|
/ IPv6(src=self.tra_if.local_ip6, dst=self.tra_if.remote_ip6)
|
|
|
|
/ IPv6ExtHdrHopByHop()
|
2024-08-01 15:09:26 +02:00
|
|
|
/ IPv6ExtHdrFragment(id=2, offset=0)
|
2022-04-26 19:02:15 +02:00
|
|
|
/ IPv6ExtHdrDestOpt()
|
|
|
|
/ Raw(b"\xff" * 200)
|
|
|
|
)
|
2019-12-20 00:54:57 +00:00
|
|
|
|
|
|
|
rxs = self.send_and_expect(self.pg2, [tx], self.tra_if)
|
|
|
|
dcs = self.verify_tra_encrypted6(p, p.vpp_tra_sa, rxs)
|
|
|
|
|
|
|
|
for dc in dcs:
|
|
|
|
dc = IPv6(raw(dc[IPv6]))
|
|
|
|
self.assertTrue(dc[IPv6ExtHdrDestOpt])
|
|
|
|
self.assertTrue(dc[IPv6ExtHdrHopByHop])
|
|
|
|
self.assert_equal(dc[IPv6ExtHdrFragment].id, 2)
|
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
|
2024-03-11 10:38:46 +00:00
|
|
|
@unittest.skipIf(
|
|
|
|
"ping" in config.excluded_plugins, "Exclude tests requiring Ping plugin"
|
|
|
|
)
|
2019-04-10 12:39:10 +00:00
|
|
|
class IpsecTra6Tests(IpsecTra6):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Transport v6"""
|
|
|
|
|
2019-04-10 12:39:10 +00:00
|
|
|
def test_tra_basic6(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec v6 transport basic test"""
|
2019-04-10 12:39:10 +00:00
|
|
|
self.verify_tra_basic6(count=1)
|
|
|
|
|
2018-09-26 11:19:00 +02:00
|
|
|
def test_tra_burst6(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec v6 transport burst test"""
|
2019-04-10 12:39:10 +00:00
|
|
|
self.verify_tra_basic6(count=257)
|
2018-09-26 11:19:00 +02:00
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-12-20 00:54:57 +00:00
|
|
|
class IpsecTra6ExtTests(IpsecTra6):
|
|
|
|
def test_tra_ext_hdrs_66(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 6o6 tra extension headers test"""
|
2019-12-20 00:54:57 +00:00
|
|
|
self.verify_tra_66_ext_hdrs(self.params[socket.AF_INET6])
|
|
|
|
|
|
|
|
|
2019-02-25 14:32:02 +00:00
|
|
|
class IpsecTra46Tests(IpsecTra4Tests, IpsecTra6Tests):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Transport v6 and v4"""
|
|
|
|
|
2019-02-25 14:32:02 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
2019-03-20 18:24:43 +00:00
|
|
|
class IpsecTun4(object):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""verify methods for Tunnel v4"""
|
|
|
|
|
2019-12-23 04:10:25 +00:00
|
|
|
def verify_counters4(self, p, count, n_frags=None, worker=None):
|
2019-05-16 14:34:55 +02:00
|
|
|
if not n_frags:
|
|
|
|
n_frags = count
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(p, "spd_policy_in_any"):
|
|
|
|
pkts = p.spd_policy_in_any.get_stats(worker)["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts,
|
|
|
|
count,
|
|
|
|
"incorrect SPD any policy: expected %d != %d" % (count, pkts),
|
|
|
|
)
|
|
|
|
|
|
|
|
if hasattr(p, "tun_sa_in"):
|
|
|
|
pkts = p.tun_sa_in.get_stats(worker)["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts, count, "incorrect SA in counts: expected %d != %d" % (count, pkts)
|
|
|
|
)
|
|
|
|
pkts = p.tun_sa_out.get_stats(worker)["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts,
|
|
|
|
n_frags,
|
|
|
|
"incorrect SA out counts: expected %d != %d" % (count, pkts),
|
|
|
|
)
|
2019-03-27 13:40:35 +00:00
|
|
|
|
2019-05-16 14:34:55 +02:00
|
|
|
self.assert_packet_counter_equal(self.tun4_encrypt_node_name, n_frags)
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assert_packet_counter_equal(self.tun4_decrypt_node_name[0], count)
|
2019-03-27 13:40:35 +00:00
|
|
|
|
2019-03-29 20:23:58 +00:00
|
|
|
def verify_decrypted(self, p, rxs):
|
|
|
|
for rx in rxs:
|
|
|
|
self.assert_equal(rx[IP].src, p.remote_tun_if_host)
|
|
|
|
self.assert_equal(rx[IP].dst, self.pg1.remote_ip4)
|
|
|
|
self.assert_packet_checksums_valid(rx)
|
|
|
|
|
2019-11-03 07:02:15 -05:00
|
|
|
def verify_esp_padding(self, sa, esp_payload, decrypt_pkt):
|
|
|
|
align = sa.crypt_algo.block_size
|
|
|
|
if align < 4:
|
|
|
|
align = 4
|
|
|
|
exp_len = (len(decrypt_pkt) + 2 + (align - 1)) & ~(align - 1)
|
|
|
|
exp_len += sa.crypt_algo.iv_size
|
|
|
|
exp_len += sa.crypt_algo.icv_size or sa.auth_algo.icv_size
|
|
|
|
self.assertEqual(exp_len, len(esp_payload))
|
|
|
|
|
2019-03-29 20:23:58 +00:00
|
|
|
def verify_encrypted(self, p, sa, rxs):
|
|
|
|
decrypt_pkts = []
|
|
|
|
for rx in rxs:
|
2019-07-16 06:19:35 -07:00
|
|
|
if p.nat_header:
|
2022-08-31 19:13:03 +02:00
|
|
|
self.assertEqual(rx[UDP].dport, p.nat_header.dport)
|
2019-04-18 19:49:13 -07:00
|
|
|
self.assert_packet_checksums_valid(rx)
|
|
|
|
self.assertEqual(len(rx) - len(Ether()), rx[IP].len)
|
2019-03-29 20:23:58 +00:00
|
|
|
try:
|
2019-11-03 07:02:15 -05:00
|
|
|
rx_ip = rx[IP]
|
|
|
|
decrypt_pkt = p.vpp_tun_sa.decrypt(rx_ip)
|
2019-03-29 20:23:58 +00:00
|
|
|
if not decrypt_pkt.haslayer(IP):
|
|
|
|
decrypt_pkt = IP(decrypt_pkt[Raw].load)
|
2019-11-03 07:02:15 -05:00
|
|
|
if rx_ip.proto == socket.IPPROTO_ESP:
|
|
|
|
self.verify_esp_padding(sa, rx_ip[ESP].data, decrypt_pkt)
|
2019-03-29 20:23:58 +00:00
|
|
|
decrypt_pkts.append(decrypt_pkt)
|
|
|
|
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip4)
|
|
|
|
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host)
|
|
|
|
except:
|
|
|
|
self.logger.debug(ppp("Unexpected packet:", rx))
|
|
|
|
try:
|
|
|
|
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
pkts = reassemble4(decrypt_pkts)
|
|
|
|
for pkt in pkts:
|
|
|
|
self.assert_packet_checksums_valid(pkt)
|
|
|
|
|
2019-03-28 08:56:10 +00:00
|
|
|
def verify_tun_44(self, p, count=1, payload_size=64, n_rx=None):
|
2018-11-13 11:12:57 +01:00
|
|
|
self.vapi.cli("clear errors")
|
2019-12-20 00:54:57 +00:00
|
|
|
self.vapi.cli("clear ipsec counters")
|
2019-12-16 00:53:11 +00:00
|
|
|
self.vapi.cli("clear ipsec sa")
|
2019-03-28 08:56:10 +00:00
|
|
|
if not n_rx:
|
|
|
|
n_rx = count
|
2018-06-24 22:49:55 +02:00
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip4,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
2018-09-26 11:19:00 +02:00
|
|
|
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
|
2019-03-29 20:23:58 +00:00
|
|
|
self.verify_decrypted(p, recv_pkts)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts(
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip4,
|
|
|
|
dst=p.remote_tun_if_host,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if, n_rx)
|
2019-03-29 20:23:58 +00:00
|
|
|
self.verify_encrypted(p, p.vpp_tun_sa, recv_pkts)
|
|
|
|
|
2020-01-02 05:04:00 +00:00
|
|
|
for rx in recv_pkts:
|
|
|
|
self.assertEqual(rx[IP].src, p.tun_src)
|
|
|
|
self.assertEqual(rx[IP].dst, p.tun_dst)
|
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
2019-05-10 20:41:08 -04:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-12-20 00:54:57 +00:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec sa 0"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec sa 4"))
|
2019-05-16 14:34:55 +02:00
|
|
|
self.verify_counters4(p, count, n_rx)
|
2019-02-17 18:04:27 +00:00
|
|
|
|
2019-12-16 00:53:11 +00:00
|
|
|
def verify_tun_dropped_44(self, p, count=1, payload_size=64, n_rx=None):
|
|
|
|
self.vapi.cli("clear errors")
|
|
|
|
if not n_rx:
|
|
|
|
n_rx = count
|
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip4,
|
|
|
|
count=count,
|
|
|
|
)
|
2019-12-16 00:53:11 +00:00
|
|
|
self.send_and_assert_no_replies(self.tun_if, send_pkts)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts(
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip4,
|
|
|
|
dst=p.remote_tun_if_host,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
2019-12-16 00:53:11 +00:00
|
|
|
self.send_and_assert_no_replies(self.pg1, send_pkts)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
|
|
|
|
2019-07-29 14:49:52 +00:00
|
|
|
def verify_tun_reass_44(self, p):
|
|
|
|
self.vapi.cli("clear errors")
|
|
|
|
self.vapi.ip_reassembly_enable_disable(
|
2022-04-26 19:02:15 +02:00
|
|
|
sw_if_index=self.tun_if.sw_if_index, enable_ip4=True
|
|
|
|
)
|
2019-07-29 14:49:52 +00:00
|
|
|
|
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip4,
|
|
|
|
payload_size=1900,
|
|
|
|
count=1,
|
|
|
|
)
|
2019-07-29 14:49:52 +00:00
|
|
|
send_pkts = fragment_rfc791(send_pkts[0], 1400)
|
2022-04-26 19:02:15 +02:00
|
|
|
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1, n_rx=1)
|
2019-07-29 14:49:52 +00:00
|
|
|
self.verify_decrypted(p, recv_pkts)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts(
|
|
|
|
self.pg1, src=self.pg1.remote_ip4, dst=p.remote_tun_if_host, count=1
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
|
2019-07-29 14:49:52 +00:00
|
|
|
self.verify_encrypted(p, p.vpp_tun_sa, recv_pkts)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
|
|
|
|
|
|
|
self.verify_counters4(p, 1, 1)
|
|
|
|
self.vapi.ip_reassembly_enable_disable(
|
2022-04-26 19:02:15 +02:00
|
|
|
sw_if_index=self.tun_if.sw_if_index, enable_ip4=False
|
|
|
|
)
|
2019-07-29 14:49:52 +00:00
|
|
|
|
2019-03-27 13:40:35 +00:00
|
|
|
def verify_tun_64(self, p, count=1):
|
|
|
|
self.vapi.cli("clear errors")
|
2020-06-30 07:47:14 +00:00
|
|
|
self.vapi.cli("clear ipsec sa")
|
2019-03-27 13:40:35 +00:00
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts6(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host6,
|
|
|
|
dst=self.pg1.remote_ip6,
|
|
|
|
count=count,
|
|
|
|
)
|
2019-03-27 13:40:35 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
|
|
|
|
for recv_pkt in recv_pkts:
|
|
|
|
self.assert_equal(recv_pkt[IPv6].src, p.remote_tun_if_host6)
|
|
|
|
self.assert_equal(recv_pkt[IPv6].dst, self.pg1.remote_ip6)
|
|
|
|
self.assert_packet_checksums_valid(recv_pkt)
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts6(
|
|
|
|
p,
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip6,
|
|
|
|
dst=p.remote_tun_if_host6,
|
|
|
|
count=count,
|
|
|
|
)
|
2019-03-27 13:40:35 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
|
|
|
|
for recv_pkt in recv_pkts:
|
|
|
|
try:
|
|
|
|
decrypt_pkt = p.vpp_tun_sa.decrypt(recv_pkt[IP])
|
|
|
|
if not decrypt_pkt.haslayer(IPv6):
|
|
|
|
decrypt_pkt = IPv6(decrypt_pkt[Raw].load)
|
|
|
|
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip6)
|
|
|
|
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host6)
|
|
|
|
self.assert_packet_checksums_valid(decrypt_pkt)
|
|
|
|
except:
|
|
|
|
self.logger.error(ppp("Unexpected packet:", recv_pkt))
|
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
|
2019-03-27 13:40:35 +00:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
2019-05-10 20:41:08 -04:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
2019-02-17 18:04:27 +00:00
|
|
|
|
2019-05-16 14:34:55 +02:00
|
|
|
self.verify_counters4(p, count)
|
2018-11-13 11:12:57 +01:00
|
|
|
|
2019-07-16 06:19:35 -07:00
|
|
|
def verify_keepalive(self, p):
|
2022-01-10 11:21:17 +00:00
|
|
|
# the sizeof Raw is calculated to pad to the minimum ehternet
|
|
|
|
# frame size of 64 btyes
|
2022-04-26 19:02:15 +02:00
|
|
|
pkt = (
|
|
|
|
Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac)
|
|
|
|
/ IP(src=p.remote_tun_if_host, dst=self.tun_if.local_ip4)
|
|
|
|
/ UDP(sport=333, dport=4500)
|
|
|
|
/ Raw(b"\xff")
|
|
|
|
/ Padding(0 * 21)
|
|
|
|
)
|
|
|
|
self.send_and_assert_no_replies(self.tun_if, pkt * 31)
|
2019-07-16 06:19:35 -07:00
|
|
|
self.assert_error_counter_equal(
|
2022-08-09 03:34:51 +00:00
|
|
|
"/err/%s/nat_keepalive" % self.tun4_input_node, 31
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
pkt = (
|
|
|
|
Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac)
|
|
|
|
/ IP(src=p.remote_tun_if_host, dst=self.tun_if.local_ip4)
|
|
|
|
/ UDP(sport=333, dport=4500)
|
|
|
|
/ Raw(b"\xfe")
|
|
|
|
)
|
|
|
|
self.send_and_assert_no_replies(self.tun_if, pkt * 31)
|
2022-08-09 03:34:51 +00:00
|
|
|
self.assert_error_counter_equal("/err/%s/too_short" % self.tun4_input_node, 31)
|
2022-04-26 19:02:15 +02:00
|
|
|
|
|
|
|
pkt = (
|
|
|
|
Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac)
|
|
|
|
/ IP(src=p.remote_tun_if_host, dst=self.tun_if.local_ip4)
|
|
|
|
/ UDP(sport=333, dport=4500)
|
|
|
|
/ Raw(b"\xfe")
|
|
|
|
/ Padding(0 * 21)
|
|
|
|
)
|
|
|
|
self.send_and_assert_no_replies(self.tun_if, pkt * 31)
|
2022-08-09 03:34:51 +00:00
|
|
|
self.assert_error_counter_equal("/err/%s/too_short" % self.tun4_input_node, 62)
|
2022-01-10 11:21:17 +00:00
|
|
|
|
2019-03-20 18:24:43 +00:00
|
|
|
|
|
|
|
class IpsecTun4Tests(IpsecTun4):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Tunnel v4"""
|
|
|
|
|
2019-03-20 18:24:43 +00:00
|
|
|
def test_tun_basic44(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 4o4 tunnel basic test"""
|
2019-03-20 18:24:43 +00:00
|
|
|
self.verify_tun_44(self.params[socket.AF_INET], count=1)
|
2019-12-20 00:54:57 +00:00
|
|
|
self.tun_if.admin_down()
|
|
|
|
self.tun_if.resolve_arp()
|
|
|
|
self.tun_if.admin_up()
|
|
|
|
self.verify_tun_44(self.params[socket.AF_INET], count=1)
|
2019-03-20 18:24:43 +00:00
|
|
|
|
2019-07-29 14:49:52 +00:00
|
|
|
def test_tun_reass_basic44(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 4o4 tunnel basic reassembly test"""
|
2019-07-29 14:49:52 +00:00
|
|
|
self.verify_tun_reass_44(self.params[socket.AF_INET])
|
|
|
|
|
2018-09-26 11:19:00 +02:00
|
|
|
def test_tun_burst44(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 4o4 tunnel burst test"""
|
2019-12-20 00:54:57 +00:00
|
|
|
self.verify_tun_44(self.params[socket.AF_INET], count=127)
|
|
|
|
|
|
|
|
|
2019-03-20 18:24:43 +00:00
|
|
|
class IpsecTun6(object):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""verify methods for Tunnel v6"""
|
|
|
|
|
2019-12-23 04:10:25 +00:00
|
|
|
def verify_counters6(self, p_in, p_out, count, worker=None):
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(p_in, "tun_sa_in"):
|
|
|
|
pkts = p_in.tun_sa_in.get_stats(worker)["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts, count, "incorrect SA in counts: expected %d != %d" % (count, pkts)
|
|
|
|
)
|
|
|
|
if hasattr(p_out, "tun_sa_out"):
|
|
|
|
pkts = p_out.tun_sa_out.get_stats(worker)["packets"]
|
|
|
|
self.assertEqual(
|
|
|
|
pkts,
|
|
|
|
count,
|
|
|
|
"incorrect SA out counts: expected %d != %d" % (count, pkts),
|
|
|
|
)
|
2019-03-27 13:40:35 +00:00
|
|
|
self.assert_packet_counter_equal(self.tun6_encrypt_node_name, count)
|
2021-02-25 10:05:32 +00:00
|
|
|
self.assert_packet_counter_equal(self.tun6_decrypt_node_name[0], count)
|
2019-03-27 13:40:35 +00:00
|
|
|
|
2019-02-07 07:26:12 -08:00
|
|
|
def verify_decrypted6(self, p, rxs):
|
|
|
|
for rx in rxs:
|
|
|
|
self.assert_equal(rx[IPv6].src, p.remote_tun_if_host)
|
|
|
|
self.assert_equal(rx[IPv6].dst, self.pg1.remote_ip6)
|
|
|
|
self.assert_packet_checksums_valid(rx)
|
|
|
|
|
|
|
|
def verify_encrypted6(self, p, sa, rxs):
|
|
|
|
for rx in rxs:
|
|
|
|
self.assert_packet_checksums_valid(rx)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assertEqual(len(rx) - len(Ether()) - len(IPv6()), rx[IPv6].plen)
|
2021-02-09 14:04:02 +00:00
|
|
|
self.assert_equal(rx[IPv6].hlim, p.outer_hop_limit)
|
|
|
|
if p.outer_flow_label:
|
|
|
|
self.assert_equal(rx[IPv6].fl, p.outer_flow_label)
|
2019-02-07 07:26:12 -08:00
|
|
|
try:
|
|
|
|
decrypt_pkt = p.vpp_tun_sa.decrypt(rx[IPv6])
|
|
|
|
if not decrypt_pkt.haslayer(IPv6):
|
|
|
|
decrypt_pkt = IPv6(decrypt_pkt[Raw].load)
|
|
|
|
self.assert_packet_checksums_valid(decrypt_pkt)
|
|
|
|
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip6)
|
|
|
|
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host)
|
2021-02-09 14:04:02 +00:00
|
|
|
self.assert_equal(decrypt_pkt.hlim, p.inner_hop_limit - 1)
|
|
|
|
self.assert_equal(decrypt_pkt.fl, p.inner_flow_label)
|
2019-02-07 07:26:12 -08:00
|
|
|
except:
|
|
|
|
self.logger.debug(ppp("Unexpected packet:", rx))
|
|
|
|
try:
|
|
|
|
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
|
ipsec: IPSec interface correct drop w/ no protection
Type: improvement
When an IPSec interface is first constructed, the end node of the feature arc is not changed, which means it is interface-output.
This means that traffic directed into adjacencies on the link, that do not have protection (w/ an SA), drop like this:
...
00:00:01:111710: ip4-midchain
tx_sw_if_index 4 dpo-idx 24 : ipv4 via 0.0.0.0 ipsec0: mtu:9000 next:6 flags:[]
stacked-on:
[@1]: dpo-drop ip4 flow hash: 0x00000000
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 58585858585858585858585858585858585858585858585858585858
00:00:01:111829: local0-output
ipsec0
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 5858585858585858585858585858585858585858585858585858585858585858
00000040: 58585858585858585858585858585858585858585858585858585858c2cf08c0
00000060: 2a2c103cd0126bd8b03c4ec20ce2bd02dd77b3e3a4f49664
00:00:01:112017: error-drop
rx:pg1
00:00:01:112034: drop
local0-output: interface is down
although that's a drop, no packets should go to local0, and we want all IPvX packets to go through ipX-drop.
This change sets the interface's end-arc node to the appropriate drop node when the interface is created, and when the last protection is removed.
The resulting drop is:
...
00:00:01:111504: ip4-midchain
tx_sw_if_index 4 dpo-idx 24 : ipv4 via 0.0.0.0 ipsec0: mtu:9000 next:0 flags:[]
stacked-on:
[@1]: dpo-drop ip4 flow hash: 0x00000000
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 58585858585858585858585858585858585858585858585858585858
00:00:01:111533: ip4-drop
ICMP: 172.16.2.2 -> 1.1.1.1
tos 0x00, ttl 63, length 92, checksum 0xcb8c dscp CS0 ecn NON_ECN
fragment id 0x0001
ICMP echo_request checksum 0xecf4 id 0
00:00:01:111620: error-drop
rx:pg1
00:00:01:111640: drop
null-node: blackholed packets
Signed-off-by: Neale Ranns <neale@graphiant.com>
Change-Id: I7e7de23c541d9f1210a05e6984a688f1f821a155
2022-01-10 10:38:43 +00:00
|
|
|
def verify_drop_tun_tx_66(self, p_in, count=1, payload_size=64):
|
|
|
|
self.vapi.cli("clear errors")
|
|
|
|
self.vapi.cli("clear ipsec sa")
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts6(
|
|
|
|
p_in,
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip6,
|
|
|
|
dst=p_in.remote_tun_if_host,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
ipsec: IPSec interface correct drop w/ no protection
Type: improvement
When an IPSec interface is first constructed, the end node of the feature arc is not changed, which means it is interface-output.
This means that traffic directed into adjacencies on the link, that do not have protection (w/ an SA), drop like this:
...
00:00:01:111710: ip4-midchain
tx_sw_if_index 4 dpo-idx 24 : ipv4 via 0.0.0.0 ipsec0: mtu:9000 next:6 flags:[]
stacked-on:
[@1]: dpo-drop ip4 flow hash: 0x00000000
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 58585858585858585858585858585858585858585858585858585858
00:00:01:111829: local0-output
ipsec0
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 5858585858585858585858585858585858585858585858585858585858585858
00000040: 58585858585858585858585858585858585858585858585858585858c2cf08c0
00000060: 2a2c103cd0126bd8b03c4ec20ce2bd02dd77b3e3a4f49664
00:00:01:112017: error-drop
rx:pg1
00:00:01:112034: drop
local0-output: interface is down
although that's a drop, no packets should go to local0, and we want all IPvX packets to go through ipX-drop.
This change sets the interface's end-arc node to the appropriate drop node when the interface is created, and when the last protection is removed.
The resulting drop is:
...
00:00:01:111504: ip4-midchain
tx_sw_if_index 4 dpo-idx 24 : ipv4 via 0.0.0.0 ipsec0: mtu:9000 next:0 flags:[]
stacked-on:
[@1]: dpo-drop ip4 flow hash: 0x00000000
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 58585858585858585858585858585858585858585858585858585858
00:00:01:111533: ip4-drop
ICMP: 172.16.2.2 -> 1.1.1.1
tos 0x00, ttl 63, length 92, checksum 0xcb8c dscp CS0 ecn NON_ECN
fragment id 0x0001
ICMP echo_request checksum 0xecf4 id 0
00:00:01:111620: error-drop
rx:pg1
00:00:01:111640: drop
null-node: blackholed packets
Signed-off-by: Neale Ranns <neale@graphiant.com>
Change-Id: I7e7de23c541d9f1210a05e6984a688f1f821a155
2022-01-10 10:38:43 +00:00
|
|
|
self.send_and_assert_no_replies(self.tun_if, send_pkts)
|
|
|
|
self.logger.info(self.vapi.cli("sh punt stats"))
|
|
|
|
|
|
|
|
def verify_drop_tun_rx_66(self, p_in, count=1, payload_size=64):
|
2018-11-13 11:12:57 +01:00
|
|
|
self.vapi.cli("clear errors")
|
2019-02-07 07:26:12 -08:00
|
|
|
self.vapi.cli("clear ipsec sa")
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts6(
|
|
|
|
p_in,
|
|
|
|
p_in.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p_in.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip6,
|
|
|
|
count=count,
|
|
|
|
)
|
2019-02-07 07:26:12 -08:00
|
|
|
self.send_and_assert_no_replies(self.tun_if, send_pkts)
|
ipsec: IPSec interface correct drop w/ no protection
Type: improvement
When an IPSec interface is first constructed, the end node of the feature arc is not changed, which means it is interface-output.
This means that traffic directed into adjacencies on the link, that do not have protection (w/ an SA), drop like this:
...
00:00:01:111710: ip4-midchain
tx_sw_if_index 4 dpo-idx 24 : ipv4 via 0.0.0.0 ipsec0: mtu:9000 next:6 flags:[]
stacked-on:
[@1]: dpo-drop ip4 flow hash: 0x00000000
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 58585858585858585858585858585858585858585858585858585858
00:00:01:111829: local0-output
ipsec0
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 5858585858585858585858585858585858585858585858585858585858585858
00000040: 58585858585858585858585858585858585858585858585858585858c2cf08c0
00000060: 2a2c103cd0126bd8b03c4ec20ce2bd02dd77b3e3a4f49664
00:00:01:112017: error-drop
rx:pg1
00:00:01:112034: drop
local0-output: interface is down
although that's a drop, no packets should go to local0, and we want all IPvX packets to go through ipX-drop.
This change sets the interface's end-arc node to the appropriate drop node when the interface is created, and when the last protection is removed.
The resulting drop is:
...
00:00:01:111504: ip4-midchain
tx_sw_if_index 4 dpo-idx 24 : ipv4 via 0.0.0.0 ipsec0: mtu:9000 next:0 flags:[]
stacked-on:
[@1]: dpo-drop ip4 flow hash: 0x00000000
00000000: 4500005c000100003f01cb8cac100202010101010800ecf40000000058585858
00000020: 58585858585858585858585858585858585858585858585858585858
00:00:01:111533: ip4-drop
ICMP: 172.16.2.2 -> 1.1.1.1
tos 0x00, ttl 63, length 92, checksum 0xcb8c dscp CS0 ecn NON_ECN
fragment id 0x0001
ICMP echo_request checksum 0xecf4 id 0
00:00:01:111620: error-drop
rx:pg1
00:00:01:111640: drop
null-node: blackholed packets
Signed-off-by: Neale Ranns <neale@graphiant.com>
Change-Id: I7e7de23c541d9f1210a05e6984a688f1f821a155
2022-01-10 10:38:43 +00:00
|
|
|
|
|
|
|
def verify_drop_tun_66(self, p_in, count=1, payload_size=64):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.verify_drop_tun_tx_66(p_in, count=count, payload_size=payload_size)
|
|
|
|
self.verify_drop_tun_rx_66(p_in, count=count, payload_size=payload_size)
|
2019-02-07 07:26:12 -08:00
|
|
|
|
|
|
|
def verify_tun_66(self, p_in, p_out=None, count=1, payload_size=64):
|
|
|
|
self.vapi.cli("clear errors")
|
|
|
|
self.vapi.cli("clear ipsec sa")
|
|
|
|
if not p_out:
|
|
|
|
p_out = p_in
|
2018-06-24 22:49:55 +02:00
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts6(
|
|
|
|
p_in,
|
|
|
|
p_in.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p_in.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip6,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
2018-09-26 11:19:00 +02:00
|
|
|
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
|
2019-02-07 07:26:12 -08:00
|
|
|
self.verify_decrypted6(p_in, recv_pkts)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts6(
|
|
|
|
p_in,
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip6,
|
|
|
|
dst=p_out.remote_tun_if_host,
|
|
|
|
count=count,
|
|
|
|
payload_size=payload_size,
|
|
|
|
)
|
2019-12-23 04:10:25 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
|
2019-02-07 07:26:12 -08:00
|
|
|
self.verify_encrypted6(p_out, p_out.vpp_tun_sa, recv_pkts)
|
|
|
|
|
2020-01-02 05:04:00 +00:00
|
|
|
for rx in recv_pkts:
|
|
|
|
self.assertEqual(rx[IPv6].src, p_out.tun_src)
|
|
|
|
self.assertEqual(rx[IPv6].dst, p_out.tun_dst)
|
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
2019-05-10 20:41:08 -04:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
2019-02-07 07:26:12 -08:00
|
|
|
self.verify_counters6(p_in, p_out, count)
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2019-07-29 14:49:52 +00:00
|
|
|
def verify_tun_reass_66(self, p):
|
|
|
|
self.vapi.cli("clear errors")
|
|
|
|
self.vapi.ip_reassembly_enable_disable(
|
2022-04-26 19:02:15 +02:00
|
|
|
sw_if_index=self.tun_if.sw_if_index, enable_ip6=True
|
|
|
|
)
|
2019-07-29 14:49:52 +00:00
|
|
|
|
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts6(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip6,
|
|
|
|
count=1,
|
|
|
|
payload_size=1850,
|
|
|
|
)
|
2019-07-29 14:49:52 +00:00
|
|
|
send_pkts = fragment_rfc8200(send_pkts[0], 1, 1400, self.logger)
|
2022-04-26 19:02:15 +02:00
|
|
|
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1, n_rx=1)
|
2019-07-29 14:49:52 +00:00
|
|
|
self.verify_decrypted6(p, recv_pkts)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts6(
|
|
|
|
p,
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip6,
|
|
|
|
dst=p.remote_tun_if_host,
|
|
|
|
count=1,
|
|
|
|
payload_size=64,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
|
2019-07-29 14:49:52 +00:00
|
|
|
self.verify_encrypted6(p, p.vpp_tun_sa, recv_pkts)
|
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
|
|
|
self.verify_counters6(p, p, 1)
|
|
|
|
self.vapi.ip_reassembly_enable_disable(
|
2022-04-26 19:02:15 +02:00
|
|
|
sw_if_index=self.tun_if.sw_if_index, enable_ip6=False
|
|
|
|
)
|
2019-07-29 14:49:52 +00:00
|
|
|
|
2019-03-27 13:40:35 +00:00
|
|
|
def verify_tun_46(self, p, count=1):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 4o6 tunnel basic test"""
|
2019-03-27 13:40:35 +00:00
|
|
|
self.vapi.cli("clear errors")
|
2020-06-30 07:47:14 +00:00
|
|
|
self.vapi.cli("clear ipsec sa")
|
2019-03-27 13:40:35 +00:00
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host4,
|
|
|
|
dst=self.pg1.remote_ip4,
|
|
|
|
count=count,
|
|
|
|
)
|
2019-03-27 13:40:35 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
|
|
|
|
for recv_pkt in recv_pkts:
|
|
|
|
self.assert_equal(recv_pkt[IP].src, p.remote_tun_if_host4)
|
|
|
|
self.assert_equal(recv_pkt[IP].dst, self.pg1.remote_ip4)
|
|
|
|
self.assert_packet_checksums_valid(recv_pkt)
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts(
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip4,
|
|
|
|
dst=p.remote_tun_if_host4,
|
|
|
|
count=count,
|
|
|
|
)
|
2019-03-27 13:40:35 +00:00
|
|
|
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
|
|
|
|
for recv_pkt in recv_pkts:
|
|
|
|
try:
|
|
|
|
decrypt_pkt = p.vpp_tun_sa.decrypt(recv_pkt[IPv6])
|
|
|
|
if not decrypt_pkt.haslayer(IP):
|
|
|
|
decrypt_pkt = IP(decrypt_pkt[Raw].load)
|
|
|
|
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip4)
|
|
|
|
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host4)
|
|
|
|
self.assert_packet_checksums_valid(decrypt_pkt)
|
|
|
|
except:
|
|
|
|
self.logger.debug(ppp("Unexpected packet:", recv_pkt))
|
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
|
2019-03-27 13:40:35 +00:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
self.logger.info(self.vapi.ppcli("show error"))
|
2019-05-10 20:41:08 -04:00
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
2019-02-07 07:26:12 -08:00
|
|
|
self.verify_counters6(p, p, count)
|
2018-11-13 11:12:57 +01:00
|
|
|
|
2022-08-09 22:19:38 +00:00
|
|
|
def verify_keepalive(self, p):
|
|
|
|
# the sizeof Raw is calculated to pad to the minimum ehternet
|
|
|
|
# frame size of 64 btyes
|
|
|
|
pkt = (
|
|
|
|
Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac)
|
|
|
|
/ IPv6(src=p.remote_tun_if_host, dst=self.tun_if.local_ip6)
|
|
|
|
/ UDP(sport=333, dport=4500)
|
|
|
|
/ Raw(b"\xff")
|
|
|
|
/ Padding(0 * 1)
|
|
|
|
)
|
|
|
|
self.send_and_assert_no_replies(self.tun_if, pkt * 31)
|
|
|
|
self.assert_error_counter_equal(
|
|
|
|
"/err/%s/nat_keepalive" % self.tun6_input_node, 31
|
|
|
|
)
|
|
|
|
|
|
|
|
pkt = (
|
|
|
|
Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac)
|
|
|
|
/ IPv6(src=p.remote_tun_if_host, dst=self.tun_if.local_ip6)
|
|
|
|
/ UDP(sport=333, dport=4500)
|
|
|
|
/ Raw(b"\xfe")
|
|
|
|
)
|
|
|
|
self.send_and_assert_no_replies(self.tun_if, pkt * 31)
|
|
|
|
self.assert_error_counter_equal("/err/%s/too_short" % self.tun6_input_node, 31)
|
|
|
|
|
|
|
|
pkt = (
|
|
|
|
Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac)
|
|
|
|
/ IPv6(src=p.remote_tun_if_host, dst=self.tun_if.local_ip6)
|
|
|
|
/ UDP(sport=333, dport=4500)
|
|
|
|
/ Raw(b"\xfe")
|
|
|
|
/ Padding(0 * 21)
|
|
|
|
)
|
|
|
|
self.send_and_assert_no_replies(self.tun_if, pkt * 31)
|
|
|
|
self.assert_error_counter_equal("/err/%s/too_short" % self.tun6_input_node, 62)
|
|
|
|
|
2019-03-20 18:24:43 +00:00
|
|
|
|
|
|
|
class IpsecTun6Tests(IpsecTun6):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Tunnel v6"""
|
2019-03-20 18:24:43 +00:00
|
|
|
|
|
|
|
def test_tun_basic66(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 6o6 tunnel basic test"""
|
2019-03-20 18:24:43 +00:00
|
|
|
self.verify_tun_66(self.params[socket.AF_INET6], count=1)
|
|
|
|
|
2019-07-29 14:49:52 +00:00
|
|
|
def test_tun_reass_basic66(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 6o6 tunnel basic reassembly test"""
|
2019-07-29 14:49:52 +00:00
|
|
|
self.verify_tun_reass_66(self.params[socket.AF_INET6])
|
|
|
|
|
2018-09-26 11:19:00 +02:00
|
|
|
def test_tun_burst66(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 6o6 tunnel burst test"""
|
2019-03-20 18:24:43 +00:00
|
|
|
self.verify_tun_66(self.params[socket.AF_INET6], count=257)
|
2018-09-26 11:19:00 +02:00
|
|
|
|
|
|
|
|
2019-12-23 04:10:25 +00:00
|
|
|
class IpsecTun6HandoffTests(IpsecTun6):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Tunnel v6 with multiple workers"""
|
|
|
|
|
2021-03-15 16:58:10 +01:00
|
|
|
vpp_worker_count = 2
|
2019-12-23 04:10:25 +00:00
|
|
|
|
|
|
|
def test_tun_handoff_66(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 6o6 tunnel worker hand-off test"""
|
2021-02-22 18:42:24 +00:00
|
|
|
self.vapi.cli("clear errors")
|
|
|
|
self.vapi.cli("clear ipsec sa")
|
|
|
|
|
2019-12-23 04:10:25 +00:00
|
|
|
N_PKTS = 15
|
|
|
|
p = self.params[socket.AF_INET6]
|
|
|
|
|
|
|
|
# inject alternately on worker 0 and 1. all counts on the SA
|
|
|
|
# should be against worker 0
|
|
|
|
for worker in [0, 1, 0, 1]:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts6(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip6,
|
|
|
|
count=N_PKTS,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(
|
|
|
|
self.tun_if, send_pkts, self.pg1, worker=worker
|
|
|
|
)
|
2019-12-23 04:10:25 +00:00
|
|
|
self.verify_decrypted6(p, recv_pkts)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts6(
|
|
|
|
p,
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip6,
|
|
|
|
dst=p.remote_tun_if_host,
|
|
|
|
count=N_PKTS,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(
|
|
|
|
self.pg1, send_pkts, self.tun_if, worker=worker
|
|
|
|
)
|
2019-12-23 04:10:25 +00:00
|
|
|
self.verify_encrypted6(p, p.vpp_tun_sa, recv_pkts)
|
|
|
|
|
|
|
|
# all counts against the first worker that was used
|
2022-04-26 19:02:15 +02:00
|
|
|
self.verify_counters6(p, p, 4 * N_PKTS, worker=0)
|
2019-12-23 04:10:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
class IpsecTun4HandoffTests(IpsecTun4):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Tunnel v4 with multiple workers"""
|
|
|
|
|
2021-03-15 16:58:10 +01:00
|
|
|
vpp_worker_count = 2
|
2019-12-23 04:10:25 +00:00
|
|
|
|
|
|
|
def test_tun_handooff_44(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""ipsec 4o4 tunnel worker hand-off test"""
|
2021-02-22 18:42:24 +00:00
|
|
|
self.vapi.cli("clear errors")
|
|
|
|
self.vapi.cli("clear ipsec sa")
|
|
|
|
|
2019-12-23 04:10:25 +00:00
|
|
|
N_PKTS = 15
|
|
|
|
p = self.params[socket.AF_INET]
|
|
|
|
|
|
|
|
# inject alternately on worker 0 and 1. all counts on the SA
|
|
|
|
# should be against worker 0
|
|
|
|
for worker in [0, 1, 0, 1]:
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_encrypt_pkts(
|
|
|
|
p,
|
|
|
|
p.scapy_tun_sa,
|
|
|
|
self.tun_if,
|
|
|
|
src=p.remote_tun_if_host,
|
|
|
|
dst=self.pg1.remote_ip4,
|
|
|
|
count=N_PKTS,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(
|
|
|
|
self.tun_if, send_pkts, self.pg1, worker=worker
|
|
|
|
)
|
2019-12-23 04:10:25 +00:00
|
|
|
self.verify_decrypted(p, recv_pkts)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
send_pkts = self.gen_pkts(
|
|
|
|
self.pg1,
|
|
|
|
src=self.pg1.remote_ip4,
|
|
|
|
dst=p.remote_tun_if_host,
|
|
|
|
count=N_PKTS,
|
|
|
|
)
|
|
|
|
recv_pkts = self.send_and_expect(
|
|
|
|
self.pg1, send_pkts, self.tun_if, worker=worker
|
|
|
|
)
|
2019-12-23 04:10:25 +00:00
|
|
|
self.verify_encrypted(p, p.vpp_tun_sa, recv_pkts)
|
|
|
|
|
|
|
|
# all counts against the first worker that was used
|
2022-04-26 19:02:15 +02:00
|
|
|
self.verify_counters4(p, 4 * N_PKTS, worker=0)
|
2019-12-23 04:10:25 +00:00
|
|
|
|
|
|
|
|
2019-02-25 14:32:02 +00:00
|
|
|
class IpsecTun46Tests(IpsecTun4Tests, IpsecTun6Tests):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""UT test methods for Tunnel v6 & v4"""
|
|
|
|
|
2018-09-26 11:19:00 +02:00
|
|
|
pass
|
|
|
|
|
2018-06-24 22:49:55 +02:00
|
|
|
|
2021-10-26 10:05:58 -05:00
|
|
|
class IPSecIPv4Fwd(VppTestCase):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Test IPSec by capturing and verifying IPv4 forwarded pkts"""
|
|
|
|
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
@classmethod
|
|
|
|
def setUpConstants(cls):
|
2021-10-26 10:05:58 -05:00
|
|
|
super(IPSecIPv4Fwd, cls).setUpConstants()
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
|
|
|
|
def setUp(self):
|
2021-10-26 10:05:58 -05:00
|
|
|
super(IPSecIPv4Fwd, self).setUp()
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
# store SPD objects so we can remove configs on tear down
|
|
|
|
self.spd_objs = []
|
|
|
|
self.spd_policies = []
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
# remove SPD policies
|
|
|
|
for obj in self.spd_policies:
|
|
|
|
obj.remove_vpp_config()
|
|
|
|
self.spd_policies = []
|
|
|
|
# remove SPD items (interface bindings first, then SPD)
|
|
|
|
for obj in reversed(self.spd_objs):
|
|
|
|
obj.remove_vpp_config()
|
|
|
|
self.spd_objs = []
|
|
|
|
# close down pg intfs
|
|
|
|
for pg in self.pg_interfaces:
|
|
|
|
pg.unconfig_ip4()
|
|
|
|
pg.admin_down()
|
2021-10-26 10:05:58 -05:00
|
|
|
super(IPSecIPv4Fwd, self).tearDown()
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
|
|
|
|
def create_interfaces(self, num_ifs=2):
|
|
|
|
# create interfaces pg0 ... pg<num_ifs>
|
|
|
|
self.create_pg_interfaces(range(num_ifs))
|
|
|
|
for pg in self.pg_interfaces:
|
|
|
|
# put the interface up
|
|
|
|
pg.admin_up()
|
|
|
|
# configure IPv4 address on the interface
|
|
|
|
pg.config_ip4()
|
|
|
|
# resolve ARP, so that we know VPP MAC
|
|
|
|
pg.resolve_arp()
|
|
|
|
self.logger.info(self.vapi.ppcli("show int addr"))
|
|
|
|
|
|
|
|
def spd_create_and_intf_add(self, spd_id, pg_list):
|
|
|
|
spd = VppIpsecSpd(self, spd_id)
|
|
|
|
spd.add_vpp_config()
|
|
|
|
self.spd_objs.append(spd)
|
|
|
|
for pg in pg_list:
|
|
|
|
spdItf = VppIpsecSpdItfBinding(self, spd, pg)
|
|
|
|
spdItf.add_vpp_config()
|
|
|
|
self.spd_objs.append(spdItf)
|
|
|
|
|
|
|
|
def get_policy(self, policy_type):
|
|
|
|
e = VppEnum.vl_api_ipsec_spd_action_t
|
|
|
|
if policy_type == "protect":
|
|
|
|
return e.IPSEC_API_SPD_ACTION_PROTECT
|
|
|
|
elif policy_type == "bypass":
|
|
|
|
return e.IPSEC_API_SPD_ACTION_BYPASS
|
|
|
|
elif policy_type == "discard":
|
|
|
|
return e.IPSEC_API_SPD_ACTION_DISCARD
|
|
|
|
else:
|
|
|
|
raise Exception("Invalid policy type: %s", policy_type)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def spd_add_rem_policy(
|
|
|
|
self,
|
|
|
|
spd_id,
|
|
|
|
src_if,
|
|
|
|
dst_if,
|
|
|
|
proto,
|
|
|
|
is_out,
|
|
|
|
priority,
|
|
|
|
policy_type,
|
|
|
|
remove=False,
|
|
|
|
all_ips=False,
|
2022-05-25 20:03:40 +00:00
|
|
|
ip_range=False,
|
|
|
|
local_ip_start=ip_address("0.0.0.0"),
|
|
|
|
local_ip_stop=ip_address("255.255.255.255"),
|
|
|
|
remote_ip_start=ip_address("0.0.0.0"),
|
|
|
|
remote_ip_stop=ip_address("255.255.255.255"),
|
|
|
|
remote_port_start=0,
|
|
|
|
remote_port_stop=65535,
|
|
|
|
local_port_start=0,
|
|
|
|
local_port_stop=65535,
|
2022-04-26 19:02:15 +02:00
|
|
|
):
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
spd = VppIpsecSpd(self, spd_id)
|
|
|
|
|
|
|
|
if all_ips:
|
|
|
|
src_range_low = ip_address("0.0.0.0")
|
|
|
|
src_range_high = ip_address("255.255.255.255")
|
|
|
|
dst_range_low = ip_address("0.0.0.0")
|
|
|
|
dst_range_high = ip_address("255.255.255.255")
|
2022-05-25 20:03:40 +00:00
|
|
|
|
|
|
|
elif ip_range:
|
|
|
|
src_range_low = local_ip_start
|
|
|
|
src_range_high = local_ip_stop
|
|
|
|
dst_range_low = remote_ip_start
|
|
|
|
dst_range_high = remote_ip_stop
|
|
|
|
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
else:
|
|
|
|
src_range_low = src_if.remote_ip4
|
|
|
|
src_range_high = src_if.remote_ip4
|
|
|
|
dst_range_low = dst_if.remote_ip4
|
|
|
|
dst_range_high = dst_if.remote_ip4
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
spdEntry = VppIpsecSpdEntry(
|
|
|
|
self,
|
|
|
|
spd,
|
|
|
|
0,
|
|
|
|
src_range_low,
|
|
|
|
src_range_high,
|
|
|
|
dst_range_low,
|
|
|
|
dst_range_high,
|
|
|
|
proto,
|
|
|
|
priority=priority,
|
|
|
|
policy=self.get_policy(policy_type),
|
|
|
|
is_outbound=is_out,
|
2022-05-25 20:03:40 +00:00
|
|
|
remote_port_start=remote_port_start,
|
|
|
|
remote_port_stop=remote_port_stop,
|
|
|
|
local_port_start=local_port_start,
|
|
|
|
local_port_stop=local_port_stop,
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if remove is False:
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
spdEntry.add_vpp_config()
|
|
|
|
self.spd_policies.append(spdEntry)
|
|
|
|
else:
|
|
|
|
spdEntry.remove_vpp_config()
|
|
|
|
self.spd_policies.remove(spdEntry)
|
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
|
|
|
return spdEntry
|
|
|
|
|
2023-10-20 05:20:47 +00:00
|
|
|
def create_stream(
|
|
|
|
self, src_if, dst_if, pkt_count, src_prt=1234, dst_prt=5678, proto="UDP"
|
|
|
|
):
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
packets = []
|
2023-10-20 05:20:47 +00:00
|
|
|
# create SA
|
|
|
|
sa = SecurityAssociation(
|
|
|
|
ESP,
|
|
|
|
spi=1000,
|
|
|
|
crypt_algo="AES-CBC",
|
|
|
|
crypt_key=b"JPjyOWBeVEQiMe7h",
|
|
|
|
auth_algo="HMAC-SHA1-96",
|
|
|
|
auth_key=b"C91KUR9GYMm5GfkEvNjX",
|
|
|
|
tunnel_header=IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4),
|
|
|
|
nat_t_header=UDP(sport=src_prt, dport=dst_prt),
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
for i in range(pkt_count):
|
|
|
|
# create packet info stored in the test case instance
|
|
|
|
info = self.create_packet_info(src_if, dst_if)
|
|
|
|
# convert the info into packet payload
|
|
|
|
payload = self.info_to_payload(info)
|
|
|
|
# create the packet itself
|
2023-10-20 05:20:47 +00:00
|
|
|
p = []
|
|
|
|
if proto == "UDP-ESP":
|
|
|
|
p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) / sa.encrypt(
|
|
|
|
IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4)
|
|
|
|
/ UDP(sport=src_prt, dport=dst_prt)
|
|
|
|
/ Raw(payload)
|
|
|
|
)
|
|
|
|
elif proto == "UDP":
|
|
|
|
p = (
|
|
|
|
Ether(dst=src_if.local_mac, src=src_if.remote_mac)
|
|
|
|
/ IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4)
|
|
|
|
/ UDP(sport=src_prt, dport=dst_prt)
|
|
|
|
/ Raw(payload)
|
|
|
|
)
|
|
|
|
elif proto == "TCP":
|
|
|
|
p = (
|
|
|
|
Ether(dst=src_if.local_mac, src=src_if.remote_mac)
|
|
|
|
/ IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4)
|
|
|
|
/ TCP(sport=src_prt, dport=dst_prt)
|
|
|
|
/ Raw(payload)
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
# store a copy of the packet in the packet info
|
|
|
|
info.data = p.copy()
|
|
|
|
# append the packet to the list
|
|
|
|
packets.append(p)
|
|
|
|
# return the created packet list
|
|
|
|
return packets
|
|
|
|
|
|
|
|
def verify_capture(self, src_if, dst_if, capture):
|
|
|
|
packet_info = None
|
|
|
|
for packet in capture:
|
|
|
|
try:
|
|
|
|
ip = packet[IP]
|
|
|
|
udp = packet[UDP]
|
|
|
|
# convert the payload to packet info object
|
|
|
|
payload_info = self.payload_to_info(packet)
|
|
|
|
# make sure the indexes match
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_equal(
|
|
|
|
payload_info.src, src_if.sw_if_index, "source sw_if_index"
|
|
|
|
)
|
|
|
|
self.assert_equal(
|
|
|
|
payload_info.dst, dst_if.sw_if_index, "destination sw_if_index"
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
packet_info = self.get_next_packet_info_for_interface2(
|
2022-04-26 19:02:15 +02:00
|
|
|
src_if.sw_if_index, dst_if.sw_if_index, packet_info
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
# make sure we didn't run out of saved packets
|
|
|
|
self.assertIsNotNone(packet_info)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_equal(
|
|
|
|
payload_info.index, packet_info.index, "packet info index"
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
saved_packet = packet_info.data # fetch the saved packet
|
|
|
|
# assert the values match
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_equal(ip.src, saved_packet[IP].src, "IP source address")
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
# ... more assertions here
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_equal(udp.sport, saved_packet[UDP].sport, "UDP source port")
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
except Exception as e:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.error(ppp("Unexpected or invalid packet:", packet))
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
raise
|
|
|
|
remaining_packet = self.get_next_packet_info_for_interface2(
|
2022-04-26 19:02:15 +02:00
|
|
|
src_if.sw_if_index, dst_if.sw_if_index, packet_info
|
|
|
|
)
|
|
|
|
self.assertIsNone(
|
|
|
|
remaining_packet,
|
|
|
|
"Interface %s: Packet expected from interface "
|
|
|
|
"%s didn't arrive" % (dst_if.name, src_if.name),
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
|
|
|
|
def verify_policy_match(self, pkt_count, spdEntry):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.info("XXXX %s %s", str(spdEntry), str(spdEntry.get_stats()))
|
|
|
|
matched_pkts = spdEntry.get_stats().get("packets")
|
|
|
|
self.logger.info("Policy %s matched: %d pkts", str(spdEntry), matched_pkts)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
self.assert_equal(pkt_count, matched_pkts)
|
|
|
|
|
2023-10-20 05:20:47 +00:00
|
|
|
# Method verify_l3_l4_capture() will verify network and transport layer
|
|
|
|
# fields of the packet sa.encrypt() gives interface number garbadge.
|
|
|
|
# thus interface validation get failed (scapy bug?). However our intent
|
|
|
|
# is to verify IP layer and above and that is covered.
|
|
|
|
|
|
|
|
def verify_l3_l4_capture(
|
|
|
|
self, src_if, dst_if, capture, tcp_port_in=1234, udp_port_in=5678
|
|
|
|
):
|
|
|
|
for packet in capture:
|
|
|
|
try:
|
|
|
|
self.assert_packet_checksums_valid(packet)
|
|
|
|
self.assert_equal(
|
|
|
|
packet[IP].src,
|
|
|
|
src_if.remote_ip4,
|
|
|
|
"decrypted packet source address",
|
|
|
|
)
|
|
|
|
self.assert_equal(
|
|
|
|
packet[IP].dst,
|
|
|
|
dst_if.remote_ip4,
|
|
|
|
"decrypted packet destination address",
|
|
|
|
)
|
|
|
|
if packet.haslayer(TCP):
|
|
|
|
self.assertFalse(
|
|
|
|
packet.haslayer(UDP),
|
|
|
|
"unexpected UDP header in decrypted packet",
|
|
|
|
)
|
|
|
|
elif packet.haslayer(UDP):
|
|
|
|
if packet[UDP].payload:
|
|
|
|
self.assertFalse(
|
|
|
|
packet[UDP][1].haslayer(UDP),
|
|
|
|
"unexpected UDP header in decrypted packet",
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.assertFalse(
|
|
|
|
packet.haslayer(UDP),
|
|
|
|
"unexpected UDP header in decrypted packet",
|
|
|
|
)
|
|
|
|
self.assert_equal(
|
|
|
|
packet[ICMP].id, self.icmp_id_in, "decrypted packet ICMP ID"
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
self.logger.error(ppp("Unexpected or invalid plain packet:", packet))
|
|
|
|
raise
|
|
|
|
|
2021-10-26 10:05:58 -05:00
|
|
|
|
|
|
|
class SpdFlowCacheTemplate(IPSecIPv4Fwd):
|
|
|
|
@classmethod
|
|
|
|
def setUpConstants(cls):
|
|
|
|
super(SpdFlowCacheTemplate, cls).setUpConstants()
|
|
|
|
# Override this method with required cmdline parameters e.g.
|
|
|
|
# cls.vpp_cmdline.extend(["ipsec", "{",
|
|
|
|
# "ipv4-outbound-spd-flow-cache on",
|
|
|
|
# "}"])
|
|
|
|
# cls.logger.info("VPP modified cmdline is %s" % " "
|
|
|
|
# .join(cls.vpp_cmdline))
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(SpdFlowCacheTemplate, self).setUp()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
super(SpdFlowCacheTemplate, self).tearDown()
|
|
|
|
|
ipsec: perf improvement of ipsec4_input_node using flow cache
Adding flow cache support to improve inbound IPv4/IPSec Security Policy
Database (SPD) lookup performance. By enabling the flow cache in startup
conf, this replaces a linear O(N) SPD search, with an O(1) hash table
search.
This patch is the ipsec4_input_node counterpart to
https://gerrit.fd.io/r/c/vpp/+/31694, and shares much of the same code,
theory and mechanism of action.
Details about the flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using a hash table without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, the old entry
will be overwritten by the new entry. Worst case is when all the
256 packets in a batch result in collision, falling back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows,
but is configurable by a startup.conf option.
4. Whenever a SPD rule is added/deleted by the control plane, all
current flow cache entries will be invalidated. As the SPD API is
not mp-safe, the data plane will wait for the control plane
operation to complete.
Cache invalidation is via an epoch counter that is incremented on
policy add/del and stored with each entry in the flow cache. If the
epoch counter in the flow cache does not match the current count,
the entry is considered stale, and we fall back to linear search.
The following configurable options are available through startup
conf under the ipsec{} entry:
1. ipv4-inbound-spd-flow-cache on/off - enable SPD flow cache
(default off)
2. ipv4-inbound-spd-hash-buckets %d - set number of hash buckets
(default 4,194,304: ~1 million flows with 25% load factor)
Performance with 1 core, 1 ESP Tunnel, null-decrypt then bypass,
94B (null encrypted packet) for different SPD policy matching indices:
SPD Policy index : 2 10 100 1000
Throughput : Mbps/Mbps Mbps/Mbps Mbps/Mbps Mbps/Mbps
(Baseline/Optimized)
ARM TX2 : 300/290 230/290 70/290 8.5/290
Type: improvement
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I8be2ad4715accbb335c38cd933904119db75827b
2021-06-25 08:11:15 -05:00
|
|
|
def get_spd_flow_cache_entries(self, outbound):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""'show ipsec spd' output:
|
ipsec: perf improvement of ipsec4_input_node using flow cache
Adding flow cache support to improve inbound IPv4/IPSec Security Policy
Database (SPD) lookup performance. By enabling the flow cache in startup
conf, this replaces a linear O(N) SPD search, with an O(1) hash table
search.
This patch is the ipsec4_input_node counterpart to
https://gerrit.fd.io/r/c/vpp/+/31694, and shares much of the same code,
theory and mechanism of action.
Details about the flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using a hash table without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, the old entry
will be overwritten by the new entry. Worst case is when all the
256 packets in a batch result in collision, falling back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows,
but is configurable by a startup.conf option.
4. Whenever a SPD rule is added/deleted by the control plane, all
current flow cache entries will be invalidated. As the SPD API is
not mp-safe, the data plane will wait for the control plane
operation to complete.
Cache invalidation is via an epoch counter that is incremented on
policy add/del and stored with each entry in the flow cache. If the
epoch counter in the flow cache does not match the current count,
the entry is considered stale, and we fall back to linear search.
The following configurable options are available through startup
conf under the ipsec{} entry:
1. ipv4-inbound-spd-flow-cache on/off - enable SPD flow cache
(default off)
2. ipv4-inbound-spd-hash-buckets %d - set number of hash buckets
(default 4,194,304: ~1 million flows with 25% load factor)
Performance with 1 core, 1 ESP Tunnel, null-decrypt then bypass,
94B (null encrypted packet) for different SPD policy matching indices:
SPD Policy index : 2 10 100 1000
Throughput : Mbps/Mbps Mbps/Mbps Mbps/Mbps Mbps/Mbps
(Baseline/Optimized)
ARM TX2 : 300/290 230/290 70/290 8.5/290
Type: improvement
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I8be2ad4715accbb335c38cd933904119db75827b
2021-06-25 08:11:15 -05:00
|
|
|
ipv4-inbound-spd-flow-cache-entries: 0
|
|
|
|
ipv4-outbound-spd-flow-cache-entries: 0
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
"""
|
|
|
|
show_ipsec_reply = self.vapi.cli("show ipsec spd")
|
|
|
|
# match the relevant section of 'show ipsec spd' output
|
2022-04-26 19:02:15 +02:00
|
|
|
if outbound:
|
ipsec: perf improvement of ipsec4_input_node using flow cache
Adding flow cache support to improve inbound IPv4/IPSec Security Policy
Database (SPD) lookup performance. By enabling the flow cache in startup
conf, this replaces a linear O(N) SPD search, with an O(1) hash table
search.
This patch is the ipsec4_input_node counterpart to
https://gerrit.fd.io/r/c/vpp/+/31694, and shares much of the same code,
theory and mechanism of action.
Details about the flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using a hash table without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, the old entry
will be overwritten by the new entry. Worst case is when all the
256 packets in a batch result in collision, falling back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows,
but is configurable by a startup.conf option.
4. Whenever a SPD rule is added/deleted by the control plane, all
current flow cache entries will be invalidated. As the SPD API is
not mp-safe, the data plane will wait for the control plane
operation to complete.
Cache invalidation is via an epoch counter that is incremented on
policy add/del and stored with each entry in the flow cache. If the
epoch counter in the flow cache does not match the current count,
the entry is considered stale, and we fall back to linear search.
The following configurable options are available through startup
conf under the ipsec{} entry:
1. ipv4-inbound-spd-flow-cache on/off - enable SPD flow cache
(default off)
2. ipv4-inbound-spd-hash-buckets %d - set number of hash buckets
(default 4,194,304: ~1 million flows with 25% load factor)
Performance with 1 core, 1 ESP Tunnel, null-decrypt then bypass,
94B (null encrypted packet) for different SPD policy matching indices:
SPD Policy index : 2 10 100 1000
Throughput : Mbps/Mbps Mbps/Mbps Mbps/Mbps Mbps/Mbps
(Baseline/Optimized)
ARM TX2 : 300/290 230/290 70/290 8.5/290
Type: improvement
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I8be2ad4715accbb335c38cd933904119db75827b
2021-06-25 08:11:15 -05:00
|
|
|
regex_match = re.search(
|
2022-04-26 19:02:15 +02:00
|
|
|
"ipv4-outbound-spd-flow-cache-entries: (.*)",
|
|
|
|
show_ipsec_reply,
|
|
|
|
re.DOTALL,
|
|
|
|
)
|
ipsec: perf improvement of ipsec4_input_node using flow cache
Adding flow cache support to improve inbound IPv4/IPSec Security Policy
Database (SPD) lookup performance. By enabling the flow cache in startup
conf, this replaces a linear O(N) SPD search, with an O(1) hash table
search.
This patch is the ipsec4_input_node counterpart to
https://gerrit.fd.io/r/c/vpp/+/31694, and shares much of the same code,
theory and mechanism of action.
Details about the flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using a hash table without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, the old entry
will be overwritten by the new entry. Worst case is when all the
256 packets in a batch result in collision, falling back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows,
but is configurable by a startup.conf option.
4. Whenever a SPD rule is added/deleted by the control plane, all
current flow cache entries will be invalidated. As the SPD API is
not mp-safe, the data plane will wait for the control plane
operation to complete.
Cache invalidation is via an epoch counter that is incremented on
policy add/del and stored with each entry in the flow cache. If the
epoch counter in the flow cache does not match the current count,
the entry is considered stale, and we fall back to linear search.
The following configurable options are available through startup
conf under the ipsec{} entry:
1. ipv4-inbound-spd-flow-cache on/off - enable SPD flow cache
(default off)
2. ipv4-inbound-spd-hash-buckets %d - set number of hash buckets
(default 4,194,304: ~1 million flows with 25% load factor)
Performance with 1 core, 1 ESP Tunnel, null-decrypt then bypass,
94B (null encrypted packet) for different SPD policy matching indices:
SPD Policy index : 2 10 100 1000
Throughput : Mbps/Mbps Mbps/Mbps Mbps/Mbps Mbps/Mbps
(Baseline/Optimized)
ARM TX2 : 300/290 230/290 70/290 8.5/290
Type: improvement
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I8be2ad4715accbb335c38cd933904119db75827b
2021-06-25 08:11:15 -05:00
|
|
|
else:
|
|
|
|
regex_match = re.search(
|
2022-04-26 19:02:15 +02:00
|
|
|
"ipv4-inbound-spd-flow-cache-entries: (.*)", show_ipsec_reply, re.DOTALL
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
if regex_match is None:
|
2022-04-26 19:02:15 +02:00
|
|
|
raise Exception(
|
|
|
|
"Unable to find spd flow cache entries \
|
|
|
|
in 'show ipsec spd' CLI output - regex failed to match"
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
num_entries = int(regex_match.group(1))
|
|
|
|
except ValueError:
|
2022-04-26 19:02:15 +02:00
|
|
|
raise Exception(
|
|
|
|
"Unable to get spd flow cache entries \
|
|
|
|
from 'show ipsec spd' string: %s",
|
|
|
|
regex_match.group(0),
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
self.logger.info("%s", regex_match.group(0))
|
|
|
|
return num_entries
|
|
|
|
|
|
|
|
def verify_num_outbound_flow_cache_entries(self, expected_elements):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assertEqual(
|
|
|
|
self.get_spd_flow_cache_entries(outbound=True), expected_elements
|
|
|
|
)
|
ipsec: perf improvement of ipsec4_input_node using flow cache
Adding flow cache support to improve inbound IPv4/IPSec Security Policy
Database (SPD) lookup performance. By enabling the flow cache in startup
conf, this replaces a linear O(N) SPD search, with an O(1) hash table
search.
This patch is the ipsec4_input_node counterpart to
https://gerrit.fd.io/r/c/vpp/+/31694, and shares much of the same code,
theory and mechanism of action.
Details about the flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using a hash table without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, the old entry
will be overwritten by the new entry. Worst case is when all the
256 packets in a batch result in collision, falling back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows,
but is configurable by a startup.conf option.
4. Whenever a SPD rule is added/deleted by the control plane, all
current flow cache entries will be invalidated. As the SPD API is
not mp-safe, the data plane will wait for the control plane
operation to complete.
Cache invalidation is via an epoch counter that is incremented on
policy add/del and stored with each entry in the flow cache. If the
epoch counter in the flow cache does not match the current count,
the entry is considered stale, and we fall back to linear search.
The following configurable options are available through startup
conf under the ipsec{} entry:
1. ipv4-inbound-spd-flow-cache on/off - enable SPD flow cache
(default off)
2. ipv4-inbound-spd-hash-buckets %d - set number of hash buckets
(default 4,194,304: ~1 million flows with 25% load factor)
Performance with 1 core, 1 ESP Tunnel, null-decrypt then bypass,
94B (null encrypted packet) for different SPD policy matching indices:
SPD Policy index : 2 10 100 1000
Throughput : Mbps/Mbps Mbps/Mbps Mbps/Mbps Mbps/Mbps
(Baseline/Optimized)
ARM TX2 : 300/290 230/290 70/290 8.5/290
Type: improvement
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I8be2ad4715accbb335c38cd933904119db75827b
2021-06-25 08:11:15 -05:00
|
|
|
|
|
|
|
def verify_num_inbound_flow_cache_entries(self, expected_elements):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assertEqual(
|
|
|
|
self.get_spd_flow_cache_entries(outbound=False), expected_elements
|
|
|
|
)
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
|
|
|
|
def crc32_supported(self):
|
|
|
|
# lscpu is part of util-linux package, available on all Linux Distros
|
2022-04-26 19:02:15 +02:00
|
|
|
stream = os.popen("lscpu")
|
ipsec: Performance improvement of ipsec4_output_node using flow cache
Adding flow cache support to improve outbound IPv4/IPSec SPD lookup
performance. Details about flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using bihash without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, old entry will
be overwritten by the new entry. Worst case is when all the 256
packets in a batch result in collision and fall back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows.
This can be made as a configurable option as a next step.
4. Whenever a SPD rule is added/deleted by the control plane, the
flow cache entries will be completely deleted (reset) in the
control plane. The assumption here is that SPD rule add/del is not
a frequent operation from control plane. Flow cache reset is done,
by putting the data plane in fall back mode, to bypass flow cache
and do linear search till the SPD rule add/delete operation is
complete. Once the rule is successfully added/deleted, the data
plane will be allowed to make use of the flow cache. The flow
cache will be reset only after flushing out the inflight packets
from all the worker cores using
vlib_worker_wait_one_loop().
Details about bihash usage:
1. A new bihash template (16_8) is added to support IPv4 5 tuple.
BIHASH_KVP_PER_PAGE and BIHASH_KVP_AT_BUCKET_LEVEL are set
to 1 in the new template. It means only one KVP is supported
per bucket.
2. Collision handling is avoided by calling
BV (clib_bihash_add_or_overwrite_stale) function.
Through the stale callback function pointer, the KVP entry
will be overwritten during collision.
3. Flow cache reset is done using
BV (clib_bihash_foreach_key_value_pair) function.
Through the callback function pointer, the KVP value is reset
to ~0ULL.
MRR performance numbers with 1 core, 1 ESP Tunnel, null-encrypt,
64B for different SPD policy matching indices:
SPD Policy index : 1 10 100 1000
Throughput : MPPS/MPPS MPPS/MPPS MPPS/MPPS KPPS/MPPS
(Baseline/Optimized)
ARM Neoverse N1 : 5.2/4.84 4.55/4.84 2.11/4.84 329.5/4.84
ARM TX2 : 2.81/2.6 2.51/2.6 1.27/2.6 176.62/2.6
INTEL SKX : 4.93/4.48 4.29/4.46 2.05/4.48 336.79/4.47
Next Steps:
Following can be made as a configurable option through startup
conf at IPSec level:
1. Enable/Disable Flow cache.
2. Bihash configuration like number of buckets and memory size.
3. Dual/Quad loop unroll can be applied around bihash to further
improve the performance.
4. The same flow cache logic can be applied for IPv6 as well as in
IPSec inbound direction. A deeper and wider flow cache using
bihash_40_8 can replace existing bihash_16_8, to make it
common for both IPv4 and IPv6 in both outbound and
inbound directions.
Following changes are made based on the review comments:
1. ON/OFF flow cache through startup conf. Default: OFF
2. Flow cache stale entry detection using epoch counter.
3. Avoid host order endianness conversion during flow cache
lookup.
4. Move IPSec startup conf to a common file.
5. Added SPD flow cache unit test case
6. Replaced bihash with vectors to implement flow cache.
7. ipsec_add_del_policy API is not mpsafe. Cleaned up
inflight packets check in control plane.
Type: improvement
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I62b4d6625fbc6caf292427a5d2046aa5672b2006
2021-03-19 19:20:49 +00:00
|
|
|
cpu_info = stream.read()
|
|
|
|
# feature/flag "crc32" on Aarch64 and "sse4_2" on x86
|
|
|
|
# see vppinfra/crc32.h
|
|
|
|
if "crc32" or "sse4_2" in cpu_info:
|
|
|
|
self.logger.info("\ncrc32 supported:\n" + cpu_info)
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
self.logger.info("\ncrc32 NOT supported:\n" + cpu_info)
|
|
|
|
return False
|
|
|
|
|
2023-10-20 05:20:47 +00:00
|
|
|
def create_stream(
|
2024-05-24 16:46:00 +01:00
|
|
|
cls, src_if, dst_if, pkt_count, src_prt=1234, dst_prt=4500, proto="UDP-ESP"
|
2023-10-20 05:20:47 +00:00
|
|
|
):
|
|
|
|
packets = []
|
|
|
|
packets = super(SpdFlowCacheTemplate, cls).create_stream(
|
|
|
|
src_if, dst_if, pkt_count, src_prt, dst_prt, proto
|
|
|
|
)
|
|
|
|
return packets
|
|
|
|
|
|
|
|
def verify_capture(
|
2024-05-24 16:46:00 +01:00
|
|
|
self, src_if, dst_if, capture, tcp_port_in=1234, udp_port_in=4500
|
2023-10-20 05:20:47 +00:00
|
|
|
):
|
|
|
|
super(SpdFlowCacheTemplate, self).verify_l3_l4_capture(
|
|
|
|
src_if, dst_if, capture, tcp_port_in, udp_port_in
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class SpdFastPathTemplate(IPSecIPv4Fwd):
|
|
|
|
@classmethod
|
|
|
|
def setUpConstants(cls):
|
|
|
|
super(SpdFastPathTemplate, cls).setUpConstants()
|
|
|
|
# Override this method with required cmdline parameters e.g.
|
|
|
|
# cls.vpp_cmdline.extend(["ipsec", "{",
|
|
|
|
# "ipv4-outbound-spd-flow-cache on",
|
|
|
|
# "}"])
|
|
|
|
# cls.logger.info("VPP modified cmdline is %s" % " "
|
|
|
|
# .join(cls.vpp_cmdline))
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(SpdFastPathTemplate, self).setUp()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
super(SpdFastPathTemplate, self).tearDown()
|
|
|
|
|
|
|
|
def create_stream(
|
2024-05-24 16:46:00 +01:00
|
|
|
cls, src_if, dst_if, pkt_count, src_prt=1234, dst_prt=4500, proto="UDP-ESP"
|
2023-10-20 05:20:47 +00:00
|
|
|
):
|
|
|
|
packets = []
|
|
|
|
packets = super(SpdFastPathTemplate, cls).create_stream(
|
|
|
|
src_if, dst_if, pkt_count, src_prt, dst_prt, proto
|
|
|
|
)
|
|
|
|
return packets
|
|
|
|
|
|
|
|
def verify_capture(
|
2024-05-24 16:46:00 +01:00
|
|
|
self, src_if, dst_if, capture, tcp_port_in=1234, udp_port_in=4500
|
2023-10-20 05:20:47 +00:00
|
|
|
):
|
|
|
|
super(SpdFastPathTemplate, self).verify_l3_l4_capture(
|
|
|
|
src_if, dst_if, capture, tcp_port_in, udp_port_in
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class IpsecDefaultTemplate(IPSecIPv4Fwd):
|
|
|
|
@classmethod
|
|
|
|
def setUpConstants(cls):
|
|
|
|
super(IpsecDefaultTemplate, cls).setUpConstants()
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(IpsecDefaultTemplate, self).setUp()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
super(IpsecDefaultTemplate, self).tearDown()
|
|
|
|
|
|
|
|
def create_stream(
|
2024-05-24 16:46:00 +01:00
|
|
|
cls, src_if, dst_if, pkt_count, src_prt=1234, dst_prt=4500, proto="UDP-ESP"
|
2023-10-20 05:20:47 +00:00
|
|
|
):
|
|
|
|
packets = []
|
|
|
|
packets = super(IpsecDefaultTemplate, cls).create_stream(
|
|
|
|
src_if, dst_if, pkt_count, src_prt, dst_prt, proto
|
|
|
|
)
|
|
|
|
return packets
|
|
|
|
|
|
|
|
def verify_capture(
|
2024-05-24 16:46:00 +01:00
|
|
|
self, src_if, dst_if, capture, tcp_port_in=1234, udp_port_in=4500
|
2023-10-20 05:20:47 +00:00
|
|
|
):
|
|
|
|
super(IpsecDefaultTemplate, self).verify_l3_l4_capture(
|
|
|
|
src_if, dst_if, capture, tcp_port_in, udp_port_in
|
|
|
|
)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
|
2022-07-08 12:45:51 +00:00
|
|
|
class IPSecIPv6Fwd(VppTestCase):
|
|
|
|
"""Test IPSec by capturing and verifying IPv6 forwarded pkts"""
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUpConstants(cls):
|
|
|
|
super(IPSecIPv6Fwd, cls).setUpConstants()
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(IPSecIPv6Fwd, self).setUp()
|
|
|
|
# store SPD objects so we can remove configs on tear down
|
|
|
|
self.spd_objs = []
|
|
|
|
self.spd_policies = []
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
# remove SPD policies
|
|
|
|
for obj in self.spd_policies:
|
|
|
|
obj.remove_vpp_config()
|
|
|
|
self.spd_policies = []
|
|
|
|
# remove SPD items (interface bindings first, then SPD)
|
|
|
|
for obj in reversed(self.spd_objs):
|
|
|
|
obj.remove_vpp_config()
|
|
|
|
self.spd_objs = []
|
|
|
|
# close down pg intfs
|
|
|
|
for pg in self.pg_interfaces:
|
|
|
|
pg.unconfig_ip6()
|
|
|
|
pg.admin_down()
|
|
|
|
super(IPSecIPv6Fwd, self).tearDown()
|
|
|
|
|
|
|
|
def create_interfaces(self, num_ifs=2):
|
|
|
|
# create interfaces pg0 ... pg<num_ifs>
|
|
|
|
self.create_pg_interfaces(range(num_ifs))
|
|
|
|
for pg in self.pg_interfaces:
|
|
|
|
# put the interface up
|
|
|
|
pg.admin_up()
|
|
|
|
# configure IPv6 address on the interface
|
|
|
|
pg.config_ip6()
|
|
|
|
pg.resolve_ndp()
|
|
|
|
self.logger.info(self.vapi.ppcli("show int addr"))
|
|
|
|
|
|
|
|
def spd_create_and_intf_add(self, spd_id, pg_list):
|
|
|
|
spd = VppIpsecSpd(self, spd_id)
|
|
|
|
spd.add_vpp_config()
|
|
|
|
self.spd_objs.append(spd)
|
|
|
|
for pg in pg_list:
|
|
|
|
spdItf = VppIpsecSpdItfBinding(self, spd, pg)
|
|
|
|
spdItf.add_vpp_config()
|
|
|
|
self.spd_objs.append(spdItf)
|
|
|
|
|
|
|
|
def get_policy(self, policy_type):
|
|
|
|
e = VppEnum.vl_api_ipsec_spd_action_t
|
|
|
|
if policy_type == "protect":
|
|
|
|
return e.IPSEC_API_SPD_ACTION_PROTECT
|
|
|
|
elif policy_type == "bypass":
|
|
|
|
return e.IPSEC_API_SPD_ACTION_BYPASS
|
|
|
|
elif policy_type == "discard":
|
|
|
|
return e.IPSEC_API_SPD_ACTION_DISCARD
|
|
|
|
else:
|
|
|
|
raise Exception("Invalid policy type: %s", policy_type)
|
|
|
|
|
|
|
|
def spd_add_rem_policy(
|
|
|
|
self,
|
|
|
|
spd_id,
|
|
|
|
src_if,
|
|
|
|
dst_if,
|
|
|
|
proto,
|
|
|
|
is_out,
|
|
|
|
priority,
|
|
|
|
policy_type,
|
|
|
|
remove=False,
|
|
|
|
all_ips=False,
|
|
|
|
ip_range=False,
|
|
|
|
local_ip_start=ip_address("0::0"),
|
|
|
|
local_ip_stop=ip_address("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"),
|
|
|
|
remote_ip_start=ip_address("0::0"),
|
|
|
|
remote_ip_stop=ip_address("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"),
|
|
|
|
remote_port_start=0,
|
|
|
|
remote_port_stop=65535,
|
|
|
|
local_port_start=0,
|
|
|
|
local_port_stop=65535,
|
|
|
|
):
|
|
|
|
spd = VppIpsecSpd(self, spd_id)
|
|
|
|
|
|
|
|
if all_ips:
|
|
|
|
src_range_low = ip_address("0::0")
|
|
|
|
src_range_high = ip_address("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
|
|
|
|
dst_range_low = ip_address("0::0")
|
|
|
|
dst_range_high = ip_address("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
|
|
|
|
|
|
|
|
elif ip_range:
|
|
|
|
src_range_low = local_ip_start
|
|
|
|
src_range_high = local_ip_stop
|
|
|
|
dst_range_low = remote_ip_start
|
|
|
|
dst_range_high = remote_ip_stop
|
|
|
|
|
|
|
|
else:
|
|
|
|
src_range_low = src_if.remote_ip6
|
|
|
|
src_range_high = src_if.remote_ip6
|
|
|
|
dst_range_low = dst_if.remote_ip6
|
|
|
|
dst_range_high = dst_if.remote_ip6
|
|
|
|
|
|
|
|
spdEntry = VppIpsecSpdEntry(
|
|
|
|
self,
|
|
|
|
spd,
|
|
|
|
0,
|
|
|
|
src_range_low,
|
|
|
|
src_range_high,
|
|
|
|
dst_range_low,
|
|
|
|
dst_range_high,
|
|
|
|
proto,
|
|
|
|
priority=priority,
|
|
|
|
policy=self.get_policy(policy_type),
|
|
|
|
is_outbound=is_out,
|
|
|
|
remote_port_start=remote_port_start,
|
|
|
|
remote_port_stop=remote_port_stop,
|
|
|
|
local_port_start=local_port_start,
|
|
|
|
local_port_stop=local_port_stop,
|
|
|
|
)
|
|
|
|
|
|
|
|
if remove is False:
|
|
|
|
spdEntry.add_vpp_config()
|
|
|
|
self.spd_policies.append(spdEntry)
|
|
|
|
else:
|
|
|
|
spdEntry.remove_vpp_config()
|
|
|
|
self.spd_policies.remove(spdEntry)
|
|
|
|
self.logger.info(self.vapi.ppcli("show ipsec all"))
|
|
|
|
return spdEntry
|
|
|
|
|
|
|
|
def create_stream(self, src_if, dst_if, pkt_count, src_prt=1234, dst_prt=5678):
|
|
|
|
packets = []
|
|
|
|
for i in range(pkt_count):
|
|
|
|
# create packet info stored in the test case instance
|
|
|
|
info = self.create_packet_info(src_if, dst_if)
|
|
|
|
# convert the info into packet payload
|
|
|
|
payload = self.info_to_payload(info)
|
|
|
|
# create the packet itself
|
|
|
|
p = (
|
|
|
|
Ether(dst=src_if.local_mac, src=src_if.remote_mac)
|
|
|
|
/ IPv6(src=src_if.remote_ip6, dst=dst_if.remote_ip6)
|
|
|
|
/ UDP(sport=src_prt, dport=dst_prt)
|
|
|
|
/ Raw(payload)
|
|
|
|
)
|
|
|
|
# store a copy of the packet in the packet info
|
|
|
|
info.data = p.copy()
|
|
|
|
# append the packet to the list
|
|
|
|
packets.append(p)
|
|
|
|
# return the created packet list
|
|
|
|
return packets
|
|
|
|
|
|
|
|
def verify_capture(self, src_if, dst_if, capture):
|
|
|
|
packet_info = None
|
|
|
|
for packet in capture:
|
|
|
|
try:
|
|
|
|
ip = packet[IPv6]
|
|
|
|
udp = packet[UDP]
|
|
|
|
# convert the payload to packet info object
|
|
|
|
payload_info = self.payload_to_info(packet)
|
|
|
|
# make sure the indexes match
|
|
|
|
self.assert_equal(
|
|
|
|
payload_info.src, src_if.sw_if_index, "source sw_if_index"
|
|
|
|
)
|
|
|
|
self.assert_equal(
|
|
|
|
payload_info.dst, dst_if.sw_if_index, "destination sw_if_index"
|
|
|
|
)
|
|
|
|
packet_info = self.get_next_packet_info_for_interface2(
|
|
|
|
src_if.sw_if_index, dst_if.sw_if_index, packet_info
|
|
|
|
)
|
|
|
|
# make sure we didn't run out of saved packets
|
|
|
|
self.assertIsNotNone(packet_info)
|
|
|
|
self.assert_equal(
|
|
|
|
payload_info.index, packet_info.index, "packet info index"
|
|
|
|
)
|
|
|
|
saved_packet = packet_info.data # fetch the saved packet
|
|
|
|
# assert the values match
|
|
|
|
self.assert_equal(ip.src, saved_packet[IPv6].src, "IP source address")
|
|
|
|
# ... more assertions here
|
|
|
|
self.assert_equal(udp.sport, saved_packet[UDP].sport, "UDP source port")
|
|
|
|
except Exception as e:
|
|
|
|
self.logger.error(ppp("Unexpected or invalid packet:", packet))
|
|
|
|
raise
|
|
|
|
remaining_packet = self.get_next_packet_info_for_interface2(
|
|
|
|
src_if.sw_if_index, dst_if.sw_if_index, packet_info
|
|
|
|
)
|
|
|
|
self.assertIsNone(
|
|
|
|
remaining_packet,
|
|
|
|
"Interface %s: Packet expected from interface "
|
|
|
|
"%s didn't arrive" % (dst_if.name, src_if.name),
|
|
|
|
)
|
|
|
|
|
|
|
|
def verify_policy_match(self, pkt_count, spdEntry):
|
|
|
|
self.logger.info("XXXX %s %s", str(spdEntry), str(spdEntry.get_stats()))
|
|
|
|
matched_pkts = spdEntry.get_stats().get("packets")
|
|
|
|
self.logger.info("Policy %s matched: %d pkts", str(spdEntry), matched_pkts)
|
|
|
|
self.assert_equal(pkt_count, matched_pkts)
|
|
|
|
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
if __name__ == "__main__":
|
2018-06-24 22:49:55 +02:00
|
|
|
unittest.main(testRunner=VppTestRunner)
|