vlib: add virtual time support

Type: feature

Change-Id: Iabd76558e9c72ed8286cfeeb1fbaa4fde4832a90
Signed-off-by: Benoît Ganne <bganne@cisco.com>
This commit is contained in:
Benoît Ganne
2021-08-20 09:18:31 +02:00
committed by Damjan Marion
parent f33979ba88
commit 56eccdbaa9
14 changed files with 157 additions and 75 deletions
+2
View File
@@ -90,6 +90,7 @@ add_vpp_library(vlib
punt_node.c
threads.c
threads_cli.c
time.c
trace.c
unix/cli.c
unix/input.c
@@ -130,6 +131,7 @@ add_vpp_library(vlib
physmem.h
punt.h
threads.h
time.h
trace_funcs.h
trace.h
unix/mc_socket.h
+8 -14
View File
@@ -1637,7 +1637,6 @@ static clib_error_t *
show_clock_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
int i;
int verbose = 0;
clib_timebase_t _tb, *tb = &_tb;
@@ -1650,24 +1649,19 @@ show_clock_command_fn (vlib_main_t * vm,
verbose, format_clib_timebase_time,
clib_timebase_now (tb));
if (vlib_get_n_threads () == 1)
return 0;
vlib_cli_output (vm, "Time last barrier release %.9f",
vm->time_last_barrier_release);
for (i = 1; i < vlib_get_n_threads (); i++)
foreach_vlib_main ()
{
vlib_main_t *ovm = vlib_get_main_by_index (i);
if (ovm == 0)
continue;
vlib_cli_output (vm, "%d: %U", this_vlib_main->thread_index,
format_clib_time, &this_vlib_main->clib_time, verbose);
vlib_cli_output (vm, "%d: %U", i, format_clib_time, &ovm->clib_time,
verbose);
vlib_cli_output (
vm, "Thread %d offset %.9f error %.9f", i, ovm->time_offset,
vm->time_last_barrier_release - ovm->time_last_barrier_release);
vlib_cli_output (vm, "Thread %d offset %.9f error %.9f",
this_vlib_main->thread_index,
this_vlib_main->time_offset,
vm->time_last_barrier_release -
this_vlib_main->time_last_barrier_release);
}
return 0;
}
+84
View File
@@ -0,0 +1,84 @@
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright(c) 2021 Cisco Systems, Inc.
*/
/* Virtual time allows to adjust VPP clock by arbitrary amount of time.
* It is done such that the order of timer expirations is maintained,
* and if a timer expiration callback reschedule another timer, this
* timer will also properly expire in the right order. IOW, the order
* of events is preserved.
*
* When moving time forward, each VPP thread (main and workers) runs an
* instance of the input node 'virtual-time-input' below. This node is
* responsible of advancing its own VPP thread clock to the next timer
* expiration. IOW each thread will move its clock independently one
* timer at a time. This also means that while moving time forward, each
* thread might not have the exact same view of what 'now' means. Once
* the main thread has finished moving its time forward, the worker thread
* barrier will ensure the timer between main and workers is synchronized.
*
* Using an input node in poll-mode has several advantages, including
* preventing 'unix-epoll-input' to sleep (as it will not sleep if at
* least one polling node is active). */
#include <vlib/vlib.h>
#include <vlib/time.h>
static f64 vlib_time_virtual_stop;
static uword
vlib_time_virtual_input (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_frame_t *frame)
{
const f64 next = vlib_time_get_next_timer (vm);
/* each thread will advance its own time. In case a thread is much faster
* than another, we must make sure it does not run away... */
if (vlib_time_now (vm) + next > vlib_time_virtual_stop)
vlib_node_set_state (vm, node->node_index, VLIB_NODE_STATE_DISABLED);
else
vlib_time_adjust (vm, next);
return 0;
}
VLIB_REGISTER_NODE (vlib_time_virtual_input_node) = {
.function = vlib_time_virtual_input,
.type = VLIB_NODE_TYPE_INPUT,
.name = "virtual-time-input",
.state = VLIB_NODE_STATE_DISABLED,
};
static clib_error_t *
vlib_time_virtual_adjust_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
f64 val;
if (!unformat (input, "%f", &val))
return clib_error_create ("unknown input `%U'", format_unformat_error,
input);
vlib_time_virtual_stop = vlib_time_now (vm) + val;
foreach_vlib_main ()
vlib_node_set_state (this_vlib_main, vlib_time_virtual_input_node.index,
VLIB_NODE_STATE_POLLING);
vlib_worker_thread_barrier_release (vm);
while ((val = vlib_process_wait_for_event_or_clock (vm, val)) >= 0.001)
;
/* this barrier sync will resynchronize all the clocks, so even if the main
* thread was faster than some workers, this will make sure the workers will
* disable their virtual-time-input node on their next iteration (as stop
* time is reached). If a worker is too slow, there is a slight chance
* several of its timers expire at the same time at this point. Time will
* tell... */
vlib_worker_thread_barrier_sync (vm);
return 0;
}
VLIB_CLI_COMMAND (vlib_time_virtual_command) = {
.path = "set clock adjust",
.short_help = "set clock adjust <nn>",
.function = vlib_time_virtual_adjust_command_fn,
};
+26
View File
@@ -0,0 +1,26 @@
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright(c) 2021 Cisco Systems, Inc.
*/
#ifndef included_vlib_time_h
#define included_vlib_time_h
#include <vlib/vlib.h>
#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
static inline f64
vlib_time_get_next_timer (vlib_main_t *vm)
{
vlib_node_main_t *nm = &vm->node_main;
TWT (tw_timer_wheel) *wheel = nm->timing_wheel;
return TW (tw_timer_first_expires_in_ticks) (wheel) * wheel->timer_interval;
}
static inline void
vlib_time_adjust (vlib_main_t *vm, f64 offset)
{
vm->time_offset += offset;
}
#endif /* included_vlib_time_h */
+4
View File
@@ -1284,6 +1284,10 @@ class VppTestCase(CPUInterface, unittest.TestCase):
"Finished sleep (%s) - slept %es (wanted %es)",
remark, after - before, timeout)
def virtual_sleep(self, timeout, remark=None):
self.logger.debug("Moving VPP time by %s (%s)", timeout, remark)
self.vapi.cli("set clock adjust %s" % timeout)
def pg_send(self, intf, pkts, worker=None, trace=True):
intf.add_stream(pkts, worker=worker)
self.pg_enable_capture(self.pg_interfaces)
+2 -9
View File
@@ -430,16 +430,9 @@ class TestCNatTranslation(CnatCommonTestCase):
# all disapper
#
self.vapi.cli("test cnat scanner on")
n_tries = 0
self.virtual_sleep(2)
sessions = self.vapi.cnat_session_dump()
while (len(sessions) and n_tries < 100):
n_tries += 1
sessions = self.vapi.cnat_session_dump()
self.sleep(2)
self.logger.info(self.vapi.cli("show cnat session verbose"))
self.assertTrue(n_tries < 100)
self.assertEqual(len(sessions), 0)
self.vapi.cli("test cnat scanner off")
#
+1 -4
View File
@@ -4,7 +4,6 @@ import socket
import struct
import unittest
import scapy.compat
from time import sleep
from framework import VppTestCase, running_extended_tests
from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
from scapy.layers.inet import IP, TCP, UDP, ICMP
@@ -577,7 +576,6 @@ class TestDET44(VppTestCase):
self.logger.error("TCP session termination failed")
raise
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_session_timeout(self):
""" Deterministic NAT session timeouts """
self.vapi.det44_add_del_map(is_add=1, in_addr=self.pg0.remote_ip4,
@@ -599,7 +597,7 @@ class TestDET44(VppTestCase):
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.get_capture(len(pkts))
sleep(15)
self.virtual_sleep(15)
dms = self.vapi.det44_map_dump()
self.assertEqual(0, dms[0].ses_num)
@@ -663,7 +661,6 @@ class TestDET44(VppTestCase):
# verify IPFIX logging
self.vapi.ipfix_flush()
sleep(1)
capture = self.pg2.get_capture(2)
ipfix = IPFIXDecoder()
# first load template
+3 -4
View File
@@ -486,7 +486,7 @@ class TestL2fib(VppTestCase):
self.vapi.want_l2_macs_events()
self.learn_hosts(bd1, hosts)
self.sleep(1)
self.virtual_sleep(1)
self.logger.info(self.vapi.ppcli("show l2fib"))
evs = self.vapi.collect_events()
action = VppEnum.vl_api_mac_event_action_t.MAC_EVENT_ACTION_API_ADD
@@ -509,7 +509,7 @@ class TestL2fib(VppTestCase):
self.sleep(1)
self.learn_hosts(bd1, hosts)
self.sleep(1)
self.virtual_sleep(1)
self.logger.info(self.vapi.ppcli("show l2fib"))
evs = self.vapi.collect_events()
action = VppEnum.vl_api_mac_event_action_t.MAC_EVENT_ACTION_API_ADD
@@ -560,7 +560,7 @@ class TestL2fib(VppTestCase):
self.sleep(1)
self.learn_hosts(bd1, hosts)
self.sleep(1)
self.virtual_sleep(1)
self.logger.info(self.vapi.ppcli("show l2fib"))
evs = self.vapi.collect_events()
self.vapi.want_l2_macs_events2(enable_disable=0)
@@ -577,6 +577,5 @@ class TestL2fib(VppTestCase):
self.assertLess(len(e), ev_macs * 10)
self.assertEqual(len(learned_macs ^ macs), 0)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
+6 -6
View File
@@ -2520,7 +2520,7 @@ class TestNAT44EDMW(TestNAT44ED):
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.get_capture(len(pkts))
self.sleep(1.5, "wait for timeouts")
self.virtual_sleep(1.5, "wait for timeouts")
pkts = []
for i in range(0, self.max_sessions - 1):
@@ -2555,7 +2555,7 @@ class TestNAT44EDMW(TestNAT44ED):
self.pg_start()
self.pg1.get_capture(1)
self.sleep(6)
self.virtual_sleep(6)
p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
@@ -3211,7 +3211,7 @@ class TestNAT44EDMW(TestNAT44ED):
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.sleep(new_transitory, "wait for transitory timeout")
self.virtual_sleep(new_transitory, "wait for transitory timeout")
self.pg0.assert_nothing_captured(0)
# session should still exist
@@ -3330,7 +3330,7 @@ class TestNAT44EDMW(TestNAT44ED):
'/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
self.assertEqual(stats - in2out_drops, 1)
self.sleep(3)
self.virtual_sleep(3)
# extra ACK packet in -> out - this will cause session to be wiped
p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
@@ -3432,7 +3432,7 @@ class TestNAT44EDMW(TestNAT44ED):
'/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
self.assertEqual(stats - in2out_drops, 1)
self.sleep(3)
self.virtual_sleep(3)
# extra ACK packet in -> out - this will cause session to be wiped
p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
@@ -3542,7 +3542,7 @@ class TestNAT44EDMW(TestNAT44ED):
'/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
self.assertEqual(stats - in2out_drops, 1)
self.sleep(3)
self.virtual_sleep(3)
# extra ACK packet in -> out - this will cause session to be wiped
p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+1 -4
View File
@@ -6,7 +6,6 @@ import socket
import struct
import unittest
from io import BytesIO
from time import sleep
import scapy.compat
from framework import VppTestCase, VppTestRunner
@@ -2342,7 +2341,6 @@ class TestNAT44EI(MethodHolder):
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
sleep(1)
self.vapi.ipfix_flush()
capture = self.pg3.get_capture(7)
ipfix = IPFIXDecoder()
@@ -2404,7 +2402,6 @@ class TestNAT44EI(MethodHolder):
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
sleep(1)
self.vapi.ipfix_flush()
capture = self.pg3.get_capture(7)
ipfix = IPFIXDecoder()
@@ -3446,7 +3443,7 @@ class TestNAT44EI(MethodHolder):
# do not send ACK, active retry send HA event again
self.pg_enable_capture(self.pg_interfaces)
sleep(12)
self.virtual_sleep(12)
stats = self.statistics['/nat44-ei/ha/retry-count']
self.assertEqual(stats[:, 0].sum(), 3)
stats = self.statistics['/nat44-ei/ha/missed-count']
+1 -5
View File
@@ -6,7 +6,6 @@ import socket
import struct
import unittest
from io import BytesIO
from time import sleep
import scapy.compat
from framework import tag_fixme_vpp_workers
@@ -934,7 +933,6 @@ class TestNAT64(VppTestCase):
self.assertEqual(ses_num_end - ses_num_start, 3)
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_session_timeout(self):
""" NAT64 session timeout """
self.icmp_id_in = 1234
@@ -959,7 +957,7 @@ class TestNAT64(VppTestCase):
ses_num_before_timeout = self.nat64_get_ses_num()
sleep(15)
self.virtual_sleep(15)
# ICMP and TCP session after timeout
ses_num_after_timeout = self.nat64_get_ses_num()
@@ -1700,7 +1698,6 @@ class TestNAT64(VppTestCase):
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
sleep(1)
self.vapi.ipfix_flush()
capture = self.pg3.get_capture(7)
ipfix = IPFIXDecoder()
@@ -1728,7 +1725,6 @@ class TestNAT64(VppTestCase):
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured()
sleep(1)
self.vapi.ipfix_flush()
capture = self.pg3.get_capture(1)
# verify events in data set
-1
View File
@@ -6,7 +6,6 @@ import socket
import struct
import unittest
from io import BytesIO
from time import sleep
import scapy.compat
from framework import VppTestCase, VppTestRunner, running_extended_tests
+7 -16
View File
@@ -1999,16 +1999,6 @@ class NeighborAgeTestCase(VppTestCase):
i.unconfig_ip6()
i.admin_down()
def wait_for_no_nbr(self, intf, address,
n_tries=50, s_time=1):
while (n_tries):
if not find_nbr(self, intf, address):
return True
n_tries = n_tries - 1
self.sleep(s_time)
return False
def verify_arp_req(self, rx, smac, sip, dip):
ether = rx[Ether]
self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
@@ -2099,10 +2089,13 @@ class NeighborAgeTestCase(VppTestCase):
self.vapi.cli("sh ip4 neighbor-sorted")
# age out neighbors
self.virtual_sleep(3)
#
# expect probes from all these ARP entries as they age
# 3 probes for each neighbor 3*200 = 600
rxs = self.pg0.get_capture(600, timeout=8)
rxs = self.pg0.get_capture(600, timeout=2)
for ii in range(3):
for jj in range(200):
@@ -2113,9 +2106,7 @@ class NeighborAgeTestCase(VppTestCase):
# 3 probes sent then 1 more second to see if a reply comes, before
# they age out
#
for jj in range(1, 201):
self.wait_for_no_nbr(self.pg0.sw_if_index,
self.pg0.remote_hosts[jj].ip4)
self.virtual_sleep(1)
self.assertFalse(self.vapi.ip_neighbor_dump(sw_if_index=0xffffffff,
af=vaf.ADDRESS_IP4))
@@ -2142,7 +2133,7 @@ class NeighborAgeTestCase(VppTestCase):
self.assertEqual(e.neighbor.mac_address,
self.pg0.remote_hosts[ii].mac)
self.sleep(10)
self.virtual_sleep(10)
self.assertFalse(self.vapi.ip_neighbor_dump(sw_if_index=0xffffffff,
af=vaf.ADDRESS_IP4))
@@ -2190,7 +2181,7 @@ class NeighborAgeTestCase(VppTestCase):
max_age=0,
recycle=False)
self.sleep(10)
self.virtual_sleep(10)
self.assertTrue(find_nbr(self,
self.pg0.sw_if_index,
self.pg0.remote_hosts[0].ip4))
+12 -12
View File
@@ -57,7 +57,7 @@ class TestIPv4Reassembly(VppTestCase):
self.vapi.ip_reassembly_set(timeout_ms=0, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10000)
@@ -480,7 +480,7 @@ Ethernet-Payload.IPv4-Packet.IPv4-Header.Fragment-Offset; Test-case: 5737'''
self.src_if.add_stream(fragments)
self.pg_start()
self.sleep(.25, "wait before sending rest of fragments")
self.virtual_sleep(.25, "wait before sending rest of fragments")
self.src_if.add_stream(fragments2)
self.pg_start()
@@ -539,7 +539,7 @@ class TestIPv4SVReassembly(VppTestCase):
max_reassembly_length=1000,
type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,
expire_walk_interval_ms=10)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(
timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
@@ -672,7 +672,7 @@ class TestIPv4SVReassembly(VppTestCase):
self.assertEqual(sent[Raw].payload, recvd[Raw].payload)
# wait for cleanup
self.sleep(.25, "wait before sending rest of fragments")
self.virtual_sleep(.25, "wait before sending rest of fragments")
# send rest of fragments - shouldn't be forwarded
self.pg_enable_capture()
@@ -825,7 +825,7 @@ class TestIPv4MWReassembly(VppTestCase):
self.vapi.ip_reassembly_set(timeout_ms=0, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10000)
@@ -997,7 +997,7 @@ class TestIPv6Reassembly(VppTestCase):
self.vapi.ip_reassembly_set(timeout_ms=0, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10, is_ip6=1)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10000, is_ip6=1)
@@ -1337,7 +1337,7 @@ class TestIPv6Reassembly(VppTestCase):
self.src_if.add_stream(fragments)
self.pg_start()
self.sleep(.25, "wait before sending rest of fragments")
self.virtual_sleep(.25, "wait before sending rest of fragments")
self.src_if.add_stream(fragments2)
self.pg_start()
@@ -1473,7 +1473,7 @@ class TestIPv6MWReassembly(VppTestCase):
self.vapi.ip_reassembly_set(timeout_ms=0, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10, is_ip6=1)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=1000, is_ip6=1)
@@ -1638,7 +1638,7 @@ class TestIPv6SVReassembly(VppTestCase):
max_reassembly_length=1000,
type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,
expire_walk_interval_ms=10, is_ip6=1)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(
timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
@@ -1769,7 +1769,7 @@ class TestIPv6SVReassembly(VppTestCase):
self.assertEqual(sent[Raw].payload, recvd[Raw].payload)
# wait for cleanup
self.sleep(.25, "wait before sending rest of fragments")
self.virtual_sleep(.25, "wait before sending rest of fragments")
# send rest of fragments - shouldn't be forwarded
self.pg_enable_capture()
@@ -1845,7 +1845,7 @@ class TestIPv4ReassemblyLocalNode(VppTestCase):
self.vapi.ip_reassembly_set(timeout_ms=0, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10000)
@@ -1981,7 +1981,7 @@ class TestFIFReassembly(VppTestCase):
self.vapi.ip_reassembly_set(timeout_ms=0, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10, is_ip6=1)
self.sleep(.25)
self.virtual_sleep(.25)
self.vapi.ip_reassembly_set(timeout_ms=1000000, max_reassemblies=1000,
max_reassembly_length=1000,
expire_walk_interval_ms=10000)