Compare commits
39 Commits
master
...
stable/231
Author | SHA1 | Date | |
---|---|---|---|
|
095a953070 | ||
|
401b53d939 | ||
|
6b287b5301 | ||
|
f9c322be7d | ||
|
b75bde18c4 | ||
|
a56e75fd71 | ||
|
da5ddd1714 | ||
|
bec4f4a7ab | ||
|
b8b02937b1 | ||
|
0d7d22cf67 | ||
|
6cc757eff7 | ||
|
9dc9136ec4 | ||
|
74a7a5ae08 | ||
|
70591c147d | ||
|
6c2464d032 | ||
|
6d83dddeb1 | ||
|
d20bacd0e5 | ||
|
dcb10ce353 | ||
|
a98ef25fc7 | ||
|
bfa5a1a7fa | ||
|
946cb7b22b | ||
|
170ab64736 | ||
|
5a164283ad | ||
|
74209bac28 | ||
|
7c4027fa5e | ||
|
fe95c23795 | ||
|
015a6f7f17 | ||
|
471dc6b1e3 | ||
|
1ec3a70f66 | ||
|
9003233377 | ||
|
3c06859f9f | ||
|
4ba523740f | ||
|
05919da49d | ||
|
b53daca83f | ||
|
15d0c7a3fb | ||
|
f9af6b32ef | ||
|
ee2e502736 | ||
|
e7295fd974 | ||
|
14df6fc1ea |
@ -2,3 +2,4 @@
|
||||
host=gerrit.fd.io
|
||||
port=29418
|
||||
project=vpp
|
||||
defaultbranch=stable/2310
|
||||
|
@ -6,6 +6,7 @@ Release notes
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
v23.10
|
||||
v23.06
|
||||
v23.02
|
||||
v22.10.1
|
||||
|
629
docs/aboutvpp/releasenotes/v23.10.rst
Normal file
629
docs/aboutvpp/releasenotes/v23.10.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -82,13 +82,15 @@ def filelist_from_git_ls():
|
||||
|
||||
def is_uncommitted_changes():
|
||||
"""Returns true if there are uncommitted changes in the repo"""
|
||||
git_status = "git status --porcelain -uno"
|
||||
returncode = run(git_status.split(), stdout=PIPE, stderr=PIPE)
|
||||
if returncode.returncode != 0:
|
||||
sys.exit(returncode.returncode)
|
||||
# Don't run this check in the Jenkins CI
|
||||
if os.getenv("FDIOTOOLS_IMAGE") is None:
|
||||
git_status = "git status --porcelain -uno"
|
||||
returncode = run(git_status.split(), stdout=PIPE, stderr=PIPE)
|
||||
if returncode.returncode != 0:
|
||||
sys.exit(returncode.returncode)
|
||||
|
||||
if returncode.stdout:
|
||||
return True
|
||||
if returncode.stdout:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
|
@ -672,50 +672,73 @@ cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
|
||||
{
|
||||
cryptodev_main_t *cmt = &cryptodev_main;
|
||||
u32 thread_index = 0;
|
||||
u16 i;
|
||||
vec_foreach_index (thread_index, cmt->per_thread_data)
|
||||
{
|
||||
cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
|
||||
cryptodev_cache_ring_t *ring = &cet->cache_ring;
|
||||
u16 head = ring->head;
|
||||
u16 tail = ring->tail;
|
||||
u16 n_cached = ((head == tail) && (ring->frames[head].f == 0)) ?
|
||||
0 :
|
||||
((head == tail) && (ring->frames[head].f != 0)) ?
|
||||
(CRYPTODEV_CACHE_QUEUE_MASK + 1) :
|
||||
(head > tail) ?
|
||||
(head - tail) :
|
||||
(CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
|
||||
u16 n_cached = (CRYPTODEV_CACHE_QUEUE_SIZE - tail + head) &
|
||||
CRYPTODEV_CACHE_QUEUE_MASK;
|
||||
|
||||
u16 enq_head = ring->enq_head;
|
||||
u16 deq_tail = ring->deq_tail;
|
||||
u16 n_frames_inflight =
|
||||
((enq_head == deq_tail) && (ring->frames[enq_head].f == 0)) ?
|
||||
(enq_head == deq_tail) ?
|
||||
0 :
|
||||
((enq_head == deq_tail) && (ring->frames[enq_head].f != 0)) ?
|
||||
CRYPTODEV_CACHE_QUEUE_MASK + 1 :
|
||||
(enq_head > deq_tail) ?
|
||||
(enq_head - deq_tail) :
|
||||
(CRYPTODEV_CACHE_QUEUE_MASK - deq_tail + enq_head);
|
||||
|
||||
((CRYPTODEV_CACHE_QUEUE_SIZE + enq_head - deq_tail) &
|
||||
CRYPTODEV_CACHE_QUEUE_MASK);
|
||||
/* even if some elements of dequeued frame are still pending for deq
|
||||
* we consider the frame as processed */
|
||||
u16 n_frames_processed =
|
||||
((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ? 0 :
|
||||
((tail == deq_tail) && (ring->frames[deq_tail].f != 0)) ? 1 :
|
||||
(deq_tail > tail) ? (deq_tail - tail + 1) :
|
||||
(CRYPTODEV_CACHE_QUEUE_MASK - tail + deq_tail - 1);
|
||||
((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
|
||||
0 :
|
||||
((CRYPTODEV_CACHE_QUEUE_SIZE - tail + deq_tail) &
|
||||
CRYPTODEV_CACHE_QUEUE_MASK) +
|
||||
1;
|
||||
/* even if some elements of enqueued frame are still pending for enq
|
||||
* we consider the frame as enqueued */
|
||||
u16 n_frames_pending =
|
||||
(head == enq_head) ? 0 :
|
||||
((CRYPTODEV_CACHE_QUEUE_SIZE - enq_head + head) &
|
||||
CRYPTODEV_CACHE_QUEUE_MASK) -
|
||||
1;
|
||||
|
||||
u16 elts_to_enq =
|
||||
(ring->frames[enq_head].n_elts - ring->frames[enq_head].enq_elts_head);
|
||||
u16 elts_to_deq =
|
||||
(ring->frames[deq_tail].n_elts - ring->frames[deq_tail].deq_elts_tail);
|
||||
|
||||
u32 elts_total = 0;
|
||||
|
||||
for (i = 0; i < CRYPTODEV_CACHE_QUEUE_SIZE; i++)
|
||||
elts_total += ring->frames[i].n_elts;
|
||||
|
||||
if (vlib_num_workers () > 0 && thread_index == 0)
|
||||
continue;
|
||||
|
||||
vlib_cli_output (vm, "\n\n");
|
||||
vlib_cli_output (vm, "Frames total: %u", n_cached);
|
||||
vlib_cli_output (vm, "Frames pending in the ring: %u",
|
||||
n_cached - n_frames_inflight - n_frames_processed);
|
||||
vlib_cli_output (vm, "Frames cached in the ring: %u", n_cached);
|
||||
vlib_cli_output (vm, "Frames cached but not processed: %u",
|
||||
n_frames_pending);
|
||||
vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight);
|
||||
vlib_cli_output (vm, "Frames dequed but not returned: %u",
|
||||
n_frames_processed);
|
||||
vlib_cli_output (vm, "Frames processed: %u", n_frames_processed);
|
||||
vlib_cli_output (vm, "Elements total: %u", elts_total);
|
||||
vlib_cli_output (vm, "Elements inflight: %u", cet->inflight);
|
||||
vlib_cli_output (vm, "Head: %u", head);
|
||||
vlib_cli_output (vm, "Tail: %u", tail);
|
||||
vlib_cli_output (vm, "Head index: %u", head);
|
||||
vlib_cli_output (vm, "Tail index: %u", tail);
|
||||
vlib_cli_output (vm, "Current frame index beeing enqueued: %u",
|
||||
enq_head);
|
||||
vlib_cli_output (vm, "Current frame index being dequeued: %u", deq_tail);
|
||||
vlib_cli_output (vm,
|
||||
"Elements in current frame to be enqueued: %u, waiting "
|
||||
"to be enqueued: %u",
|
||||
ring->frames[enq_head].n_elts, elts_to_enq);
|
||||
vlib_cli_output (vm,
|
||||
"Elements in current frame to be dequeued: %u, waiting "
|
||||
"to be dequeued: %u",
|
||||
ring->frames[deq_tail].n_elts, elts_to_deq);
|
||||
vlib_cli_output (vm, "\n\n");
|
||||
}
|
||||
return 0;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#define CRYPTODEV_MAX_IV_SIZE 16
|
||||
#define CRYPTODEV_MAX_AAD_SIZE 16
|
||||
#define CRYPTODEV_MAX_N_SGL 8 /**< maximum number of segments */
|
||||
#define CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE 8
|
||||
|
||||
#define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
|
||||
#define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
|
||||
@ -303,19 +304,24 @@ cryptodev_cache_ring_push (cryptodev_cache_ring_t *r,
|
||||
vnet_crypto_async_frame_t *f)
|
||||
{
|
||||
u16 head = r->head;
|
||||
u16 tail = r->tail;
|
||||
|
||||
cryptodev_cache_ring_elt_t *ring_elt = &r->frames[head];
|
||||
/**
|
||||
* in debug mode we do the ring sanity test when a frame is enqueued to
|
||||
* the ring.
|
||||
**/
|
||||
#if CLIB_DEBUG > 0
|
||||
u16 tail = r->tail;
|
||||
u16 n_cached = (head >= tail) ? (head - tail) :
|
||||
(CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
|
||||
ERROR_ASSERT (n_cached < VNET_CRYPTO_FRAME_POOL_SIZE);
|
||||
ERROR_ASSERT (n_cached < CRYPTODEV_CACHE_QUEUE_SIZE);
|
||||
ERROR_ASSERT (r->raw == 0 && r->frames[head].raw == 0 &&
|
||||
r->frames[head].f == 0);
|
||||
#endif
|
||||
/*the ring capacity is CRYPTODEV_CACHE_QUEUE_SIZE - 1*/
|
||||
if (PREDICT_FALSE (head + 1) == tail)
|
||||
return 0;
|
||||
|
||||
ring_elt->f = f;
|
||||
ring_elt->n_elts = f->n_elts;
|
||||
/* update head */
|
||||
|
@ -148,6 +148,9 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
|
||||
cryptodev_cache_ring_elt_t *ring_elt =
|
||||
cryptodev_cache_ring_push (ring, frame);
|
||||
|
||||
if (PREDICT_FALSE (ring_elt == NULL))
|
||||
return -1;
|
||||
|
||||
ring_elt->aad_len = 1;
|
||||
ring_elt->op_type = (u8) op_type;
|
||||
return 0;
|
||||
@ -295,6 +298,10 @@ cryptodev_frame_aead_enqueue (vlib_main_t *vm,
|
||||
ERROR_ASSERT (frame->n_elts > 0);
|
||||
cryptodev_cache_ring_elt_t *ring_elt =
|
||||
cryptodev_cache_ring_push (ring, frame);
|
||||
|
||||
if (PREDICT_FALSE (ring_elt == NULL))
|
||||
return -1;
|
||||
|
||||
ring_elt->aad_len = aad_len;
|
||||
ring_elt->op_type = (u8) op_type;
|
||||
return 0;
|
||||
@ -462,7 +469,7 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
|
||||
vnet_crypto_async_frame_t *frame = NULL;
|
||||
cryptodev_cache_ring_t *ring = &cet->cache_ring;
|
||||
u16 *const deq = &ring->deq_tail;
|
||||
u16 n_deq, idx, left_to_deq, i;
|
||||
u16 n_deq, left_to_deq;
|
||||
u16 max_to_deq = 0;
|
||||
u16 inflight = cet->inflight;
|
||||
u8 dequeue_more = 0;
|
||||
@ -472,29 +479,12 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
|
||||
u32 n_elts, n;
|
||||
u64 err0 = 0, err1 = 0, err2 = 0, err3 = 0; /* partial errors mask */
|
||||
|
||||
idx = ring->deq_tail;
|
||||
|
||||
for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
|
||||
{
|
||||
u32 frame_inflight =
|
||||
CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT (ring, idx);
|
||||
|
||||
if (PREDICT_TRUE (frame_inflight > 0))
|
||||
break;
|
||||
idx++;
|
||||
idx &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
|
||||
}
|
||||
|
||||
ERROR_ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
|
||||
ring->deq_tail = idx;
|
||||
|
||||
left_to_deq =
|
||||
ring->frames[*deq].f->n_elts - ring->frames[*deq].deq_elts_tail;
|
||||
max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
|
||||
|
||||
/* deq field can be used to track frame that is currently dequeued
|
||||
based on that you can specify the amount of elements to deq for the frame */
|
||||
|
||||
n_deq =
|
||||
rte_cryptodev_dequeue_burst (cet->cryptodev_id, cet->cryptodev_q,
|
||||
(struct rte_crypto_op **) cops, max_to_deq);
|
||||
@ -547,9 +537,13 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
|
||||
ring->frames[*deq].deq_elts_tail += n_deq;
|
||||
if (cryptodev_cache_ring_update_deq_tail (ring, deq))
|
||||
{
|
||||
u32 fr_processed =
|
||||
(CRYPTODEV_CACHE_QUEUE_SIZE - ring->tail + ring->deq_tail) &
|
||||
CRYPTODEV_CACHE_QUEUE_MASK;
|
||||
|
||||
*nb_elts_processed = frame->n_elts;
|
||||
*enqueue_thread_idx = frame->enqueue_thread_index;
|
||||
dequeue_more = (max_to_deq < CRYPTODE_DEQ_MAX);
|
||||
dequeue_more = (fr_processed < CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE);
|
||||
}
|
||||
|
||||
cet->inflight = inflight;
|
||||
|
@ -118,6 +118,9 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
|
||||
cryptodev_cache_ring_elt_t *ring_elt =
|
||||
cryptodev_cache_ring_push (ring, frame);
|
||||
|
||||
if (PREDICT_FALSE (ring_elt == NULL))
|
||||
return -1;
|
||||
|
||||
ring_elt->aad_len = 1;
|
||||
ring_elt->op_type = (u8) op_type;
|
||||
return 0;
|
||||
@ -272,6 +275,9 @@ cryptodev_raw_aead_enqueue (vlib_main_t *vm, vnet_crypto_async_frame_t *frame,
|
||||
cryptodev_cache_ring_elt_t *ring_elt =
|
||||
cryptodev_cache_ring_push (ring, frame);
|
||||
|
||||
if (PREDICT_FALSE (ring_elt == NULL))
|
||||
return -1;
|
||||
|
||||
ring_elt->aad_len = aad_len;
|
||||
ring_elt->op_type = (u8) op_type;
|
||||
return 0;
|
||||
@ -466,32 +472,17 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
|
||||
cryptodev_cache_ring_t *ring = &cet->cache_ring;
|
||||
u16 *const deq = &ring->deq_tail;
|
||||
u32 n_success;
|
||||
u16 n_deq, indice, i, left_to_deq;
|
||||
u16 n_deq, i, left_to_deq;
|
||||
u16 max_to_deq = 0;
|
||||
u16 inflight = cet->inflight;
|
||||
u8 dequeue_more = 0;
|
||||
int dequeue_status;
|
||||
|
||||
indice = *deq;
|
||||
|
||||
for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
|
||||
{
|
||||
if (PREDICT_TRUE (
|
||||
CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT (ring, indice) > 0))
|
||||
break;
|
||||
indice += 1;
|
||||
indice &= CRYPTODEV_CACHE_QUEUE_MASK;
|
||||
}
|
||||
|
||||
ERROR_ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
|
||||
|
||||
*deq = indice;
|
||||
|
||||
left_to_deq = ring->frames[*deq].n_elts - ring->frames[*deq].deq_elts_tail;
|
||||
max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
|
||||
|
||||
/* you can use deq field to track frame that is currently dequeued */
|
||||
/* based on that you can specify the amount of elements to deq for the frame
|
||||
/* deq field can be used to track frame that is currently dequeued */
|
||||
/* based on thatthe amount of elements to deq for the frame can be specified
|
||||
*/
|
||||
|
||||
n_deq = rte_cryptodev_raw_dequeue_burst (
|
||||
@ -516,9 +507,13 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
|
||||
|
||||
if (cryptodev_cache_ring_update_deq_tail (ring, deq))
|
||||
{
|
||||
u32 fr_processed =
|
||||
(CRYPTODEV_CACHE_QUEUE_SIZE - ring->tail + ring->deq_tail) &
|
||||
CRYPTODEV_CACHE_QUEUE_MASK;
|
||||
|
||||
*nb_elts_processed = frame->n_elts;
|
||||
*enqueue_thread_idx = frame->enqueue_thread_index;
|
||||
dequeue_more = max_to_deq < CRYPTODE_DEQ_MAX;
|
||||
dequeue_more = (fr_processed < CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE);
|
||||
}
|
||||
|
||||
int res =
|
||||
@ -555,24 +550,18 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
|
||||
u8 dequeue_more = 1;
|
||||
|
||||
while (cet->inflight > 0 && dequeue_more)
|
||||
{
|
||||
dequeue_more = cryptodev_raw_dequeue_internal (vm, nb_elts_processed,
|
||||
enqueue_thread_idx);
|
||||
}
|
||||
|
||||
if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0))
|
||||
cryptodev_enqueue_frame_to_qat (vm, &ring->frames[ring->enq_head]);
|
||||
|
||||
if (PREDICT_TRUE (ring_elt->f != 0))
|
||||
if (PREDICT_TRUE (ring_elt->f != 0) &&
|
||||
(ring_elt->n_elts == ring_elt->deq_elts_tail))
|
||||
{
|
||||
if (ring_elt->enq_elts_head == ring_elt->deq_elts_tail)
|
||||
{
|
||||
vlib_node_set_interrupt_pending (
|
||||
vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
|
||||
ret_frame = cryptodev_cache_ring_pop (ring);
|
||||
|
||||
return ret_frame;
|
||||
}
|
||||
vlib_node_set_interrupt_pending (
|
||||
vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
|
||||
ret_frame = cryptodev_cache_ring_pop (ring);
|
||||
}
|
||||
|
||||
return ret_frame;
|
||||
|
@ -245,6 +245,7 @@ flowprobe_template_rewrite_inline (ipfix_exporter_t *exp, flow_report_t *fr,
|
||||
flowprobe_main_t *fm = &flowprobe_main;
|
||||
flowprobe_record_t flags = fr->opaque.as_uword;
|
||||
bool collect_ip4 = false, collect_ip6 = false;
|
||||
bool collect_l4 = false;
|
||||
|
||||
stream = &exp->streams[fr->stream_index];
|
||||
|
||||
@ -257,6 +258,10 @@ flowprobe_template_rewrite_inline (ipfix_exporter_t *exp, flow_report_t *fr,
|
||||
if (which == FLOW_VARIANT_L2_IP6)
|
||||
flags |= FLOW_RECORD_L2_IP6;
|
||||
}
|
||||
if (flags & FLOW_RECORD_L4)
|
||||
{
|
||||
collect_l4 = (which != FLOW_VARIANT_L2);
|
||||
}
|
||||
|
||||
field_count += flowprobe_template_common_field_count ();
|
||||
if (flags & FLOW_RECORD_L2)
|
||||
@ -265,7 +270,7 @@ flowprobe_template_rewrite_inline (ipfix_exporter_t *exp, flow_report_t *fr,
|
||||
field_count += flowprobe_template_ip4_field_count ();
|
||||
if (collect_ip6)
|
||||
field_count += flowprobe_template_ip6_field_count ();
|
||||
if (flags & FLOW_RECORD_L4)
|
||||
if (collect_l4)
|
||||
field_count += flowprobe_template_l4_field_count ();
|
||||
|
||||
/* allocate rewrite space */
|
||||
@ -304,7 +309,7 @@ flowprobe_template_rewrite_inline (ipfix_exporter_t *exp, flow_report_t *fr,
|
||||
f = flowprobe_template_ip4_fields (f);
|
||||
if (collect_ip6)
|
||||
f = flowprobe_template_ip6_fields (f);
|
||||
if (flags & FLOW_RECORD_L4)
|
||||
if (collect_l4)
|
||||
f = flowprobe_template_l4_fields (f);
|
||||
|
||||
/* Back to the template packet... */
|
||||
@ -503,6 +508,43 @@ flowprobe_create_state_tables (u32 active_timer)
|
||||
return error;
|
||||
}
|
||||
|
||||
static clib_error_t *
|
||||
flowprobe_clear_state_if_index (u32 sw_if_index)
|
||||
{
|
||||
flowprobe_main_t *fm = &flowprobe_main;
|
||||
clib_error_t *error = 0;
|
||||
u32 worker_i;
|
||||
u32 entry_i;
|
||||
|
||||
if (fm->active_timer > 0)
|
||||
{
|
||||
vec_foreach_index (worker_i, fm->pool_per_worker)
|
||||
{
|
||||
pool_foreach_index (entry_i, fm->pool_per_worker[worker_i])
|
||||
{
|
||||
flowprobe_entry_t *e =
|
||||
pool_elt_at_index (fm->pool_per_worker[worker_i], entry_i);
|
||||
if (e->key.rx_sw_if_index == sw_if_index ||
|
||||
e->key.tx_sw_if_index == sw_if_index)
|
||||
{
|
||||
e->packetcount = 0;
|
||||
e->octetcount = 0;
|
||||
e->prot.tcp.flags = 0;
|
||||
if (fm->passive_timer > 0)
|
||||
{
|
||||
tw_timer_stop_2t_1w_2048sl (
|
||||
fm->timers_per_worker[worker_i],
|
||||
e->passive_timer_handle);
|
||||
}
|
||||
flowprobe_delete_by_index (worker_i, entry_i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
validate_feature_on_interface (flowprobe_main_t * fm, u32 sw_if_index,
|
||||
u8 which)
|
||||
@ -548,12 +590,17 @@ flowprobe_interface_add_del_feature (flowprobe_main_t *fm, u32 sw_if_index,
|
||||
{
|
||||
if (which == FLOW_VARIANT_L2)
|
||||
{
|
||||
if (!is_add)
|
||||
{
|
||||
flowprobe_flush_callback_l2 ();
|
||||
}
|
||||
if (fm->record & FLOW_RECORD_L2)
|
||||
{
|
||||
rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags,
|
||||
flowprobe_data_callback_l2,
|
||||
flowprobe_template_rewrite_l2,
|
||||
is_add, &template_id);
|
||||
fm->template_reports[flags] = (is_add) ? template_id : 0;
|
||||
}
|
||||
if (fm->record & FLOW_RECORD_L3 || fm->record & FLOW_RECORD_L4)
|
||||
{
|
||||
@ -576,20 +623,30 @@ flowprobe_interface_add_del_feature (flowprobe_main_t *fm, u32 sw_if_index,
|
||||
flags | FLOW_RECORD_L2_IP4;
|
||||
fm->context[FLOW_VARIANT_L2_IP6].flags =
|
||||
flags | FLOW_RECORD_L2_IP6;
|
||||
|
||||
fm->template_reports[flags] = template_id;
|
||||
}
|
||||
}
|
||||
else if (which == FLOW_VARIANT_IP4)
|
||||
rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags,
|
||||
flowprobe_data_callback_ip4,
|
||||
flowprobe_template_rewrite_ip4,
|
||||
is_add, &template_id);
|
||||
{
|
||||
if (!is_add)
|
||||
{
|
||||
flowprobe_flush_callback_ip4 ();
|
||||
}
|
||||
rv = flowprobe_template_add_del (
|
||||
1, UDP_DST_PORT_ipfix, flags, flowprobe_data_callback_ip4,
|
||||
flowprobe_template_rewrite_ip4, is_add, &template_id);
|
||||
fm->template_reports[flags] = (is_add) ? template_id : 0;
|
||||
}
|
||||
else if (which == FLOW_VARIANT_IP6)
|
||||
rv = flowprobe_template_add_del (1, UDP_DST_PORT_ipfix, flags,
|
||||
flowprobe_data_callback_ip6,
|
||||
flowprobe_template_rewrite_ip6,
|
||||
is_add, &template_id);
|
||||
{
|
||||
if (!is_add)
|
||||
{
|
||||
flowprobe_flush_callback_ip6 ();
|
||||
}
|
||||
rv = flowprobe_template_add_del (
|
||||
1, UDP_DST_PORT_ipfix, flags, flowprobe_data_callback_ip6,
|
||||
flowprobe_template_rewrite_ip6, is_add, &template_id);
|
||||
fm->template_reports[flags] = (is_add) ? template_id : 0;
|
||||
}
|
||||
}
|
||||
if (rv && rv != VNET_API_ERROR_VALUE_EXIST)
|
||||
{
|
||||
@ -600,7 +657,6 @@ flowprobe_interface_add_del_feature (flowprobe_main_t *fm, u32 sw_if_index,
|
||||
if (which != (u8) ~ 0)
|
||||
{
|
||||
fm->context[which].flags = fm->record;
|
||||
fm->template_reports[flags] = (is_add) ? template_id : 0;
|
||||
}
|
||||
|
||||
if (direction == FLOW_DIRECTION_RX || direction == FLOW_DIRECTION_BOTH)
|
||||
@ -645,6 +701,11 @@ flowprobe_interface_add_del_feature (flowprobe_main_t *fm, u32 sw_if_index,
|
||||
vlib_process_signal_event (vm, flowprobe_timer_node.index, 1, 0);
|
||||
}
|
||||
|
||||
if (!is_add && fm->initialized)
|
||||
{
|
||||
flowprobe_clear_state_if_index (sw_if_index);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -168,6 +168,8 @@ typedef struct
|
||||
extern flowprobe_main_t flowprobe_main;
|
||||
extern vlib_node_registration_t flowprobe_walker_node;
|
||||
|
||||
void flowprobe_delete_by_index (u32 my_cpu_number, u32 poolindex);
|
||||
|
||||
void flowprobe_flush_callback_ip4 (void);
|
||||
void flowprobe_flush_callback_ip6 (void);
|
||||
void flowprobe_flush_callback_l2 (void);
|
||||
|
@ -384,9 +384,11 @@ add_to_flow_record_state (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
flowprobe_record_t flags = fm->context[which].flags;
|
||||
bool collect_ip4 = false, collect_ip6 = false;
|
||||
ASSERT (b);
|
||||
ethernet_header_t *eth = ethernet_buffer_get_header (b);
|
||||
ethernet_header_t *eth = (direction == FLOW_DIRECTION_TX) ?
|
||||
vlib_buffer_get_current (b) :
|
||||
ethernet_buffer_get_header (b);
|
||||
u16 ethertype = clib_net_to_host_u16 (eth->type);
|
||||
u16 l2_hdr_sz = sizeof (ethernet_header_t);
|
||||
i16 l3_hdr_offset = (u8 *) eth - b->data + sizeof (ethernet_header_t);
|
||||
/* *INDENT-OFF* */
|
||||
flowprobe_key_t k = {};
|
||||
/* *INDENT-ON* */
|
||||
@ -423,13 +425,13 @@ add_to_flow_record_state (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
while (clib_net_to_host_u16 (ethv->type) == ETHERNET_TYPE_VLAN)
|
||||
{
|
||||
ethv++;
|
||||
l2_hdr_sz += sizeof (ethernet_vlan_header_tv_t);
|
||||
l3_hdr_offset += sizeof (ethernet_vlan_header_tv_t);
|
||||
}
|
||||
k.ethertype = ethertype = clib_net_to_host_u16 ((ethv)->type);
|
||||
}
|
||||
if (collect_ip6 && ethertype == ETHERNET_TYPE_IP6)
|
||||
{
|
||||
ip6 = (ip6_header_t *) (b->data + l2_hdr_sz);
|
||||
ip6 = (ip6_header_t *) (b->data + l3_hdr_offset);
|
||||
if (flags & FLOW_RECORD_L3)
|
||||
{
|
||||
k.src_address.as_u64[0] = ip6->src_address.as_u64[0];
|
||||
@ -448,7 +450,7 @@ add_to_flow_record_state (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
}
|
||||
if (collect_ip4 && ethertype == ETHERNET_TYPE_IP4)
|
||||
{
|
||||
ip4 = (ip4_header_t *) (b->data + l2_hdr_sz);
|
||||
ip4 = (ip4_header_t *) (b->data + l3_hdr_offset);
|
||||
if (flags & FLOW_RECORD_L3)
|
||||
{
|
||||
k.src_address.ip4.as_u32 = ip4->src_address.as_u32;
|
||||
@ -701,6 +703,7 @@ flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e)
|
||||
ipfix_exporter_t *exp = pool_elt_at_index (flow_report_main.exporters, 0);
|
||||
vlib_buffer_t *b0;
|
||||
bool collect_ip4 = false, collect_ip6 = false;
|
||||
bool collect_l4 = false;
|
||||
flowprobe_variant_t which = e->key.which;
|
||||
flowprobe_record_t flags = fm->context[which].flags;
|
||||
u16 offset =
|
||||
@ -719,6 +722,10 @@ flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e)
|
||||
collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
|
||||
collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
|
||||
}
|
||||
if (flags & FLOW_RECORD_L4)
|
||||
{
|
||||
collect_l4 = (which != FLOW_VARIANT_L2);
|
||||
}
|
||||
|
||||
offset += flowprobe_common_add (b0, e, offset);
|
||||
|
||||
@ -728,13 +735,14 @@ flowprobe_export_entry (vlib_main_t * vm, flowprobe_entry_t * e)
|
||||
offset += flowprobe_l3_ip6_add (b0, e, offset);
|
||||
if (collect_ip4)
|
||||
offset += flowprobe_l3_ip4_add (b0, e, offset);
|
||||
if (flags & FLOW_RECORD_L4)
|
||||
if (collect_l4)
|
||||
offset += flowprobe_l4_add (b0, e, offset);
|
||||
|
||||
/* Reset per flow-export counters */
|
||||
e->packetcount = 0;
|
||||
e->octetcount = 0;
|
||||
e->last_exported = vlib_time_now (vm);
|
||||
e->prot.tcp.flags = 0;
|
||||
|
||||
b0->current_length = offset;
|
||||
|
||||
@ -955,8 +963,7 @@ flowprobe_flush_callback_l2 (void)
|
||||
flush_record (FLOW_VARIANT_L2_IP6);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
void
|
||||
flowprobe_delete_by_index (u32 my_cpu_number, u32 poolindex)
|
||||
{
|
||||
flowprobe_main_t *fm = &flowprobe_main;
|
||||
|
@ -100,6 +100,8 @@ memif_disconnect (memif_if_t * mif, clib_error_t * err)
|
||||
memif_region_t *mr;
|
||||
memif_queue_t *mq;
|
||||
int i;
|
||||
vlib_main_t *vm = vlib_get_main ();
|
||||
int with_barrier = 0;
|
||||
|
||||
if (mif == 0)
|
||||
return;
|
||||
@ -141,6 +143,12 @@ memif_disconnect (memif_if_t * mif, clib_error_t * err)
|
||||
clib_mem_free (mif->sock);
|
||||
}
|
||||
|
||||
if (vlib_worker_thread_barrier_held () == 0)
|
||||
{
|
||||
with_barrier = 1;
|
||||
vlib_worker_thread_barrier_sync (vm);
|
||||
}
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
vec_foreach_index (i, mif->rx_queues)
|
||||
{
|
||||
@ -198,6 +206,9 @@ memif_disconnect (memif_if_t * mif, clib_error_t * err)
|
||||
vec_free (mif->remote_name);
|
||||
vec_free (mif->remote_if_name);
|
||||
clib_fifo_free (mif->msg_queue);
|
||||
|
||||
if (with_barrier)
|
||||
vlib_worker_thread_barrier_release (vm);
|
||||
}
|
||||
|
||||
static clib_error_t *
|
||||
|
@ -72,7 +72,7 @@ openssl_ctx_free (tls_ctx_t * ctx)
|
||||
|
||||
SSL_free (oc->ssl);
|
||||
vec_free (ctx->srv_hostname);
|
||||
|
||||
SSL_CTX_free (oc->client_ssl_ctx);
|
||||
#ifdef HAVE_OPENSSL_ASYNC
|
||||
openssl_evt_free (ctx->evt_index, ctx->c_thread_index);
|
||||
#endif
|
||||
@ -163,7 +163,7 @@ openssl_lctx_get (u32 lctx_index)
|
||||
return -1;
|
||||
|
||||
static int
|
||||
openssl_read_from_ssl_into_fifo (svm_fifo_t * f, SSL * ssl)
|
||||
openssl_read_from_ssl_into_fifo (svm_fifo_t *f, SSL *ssl, u32 max_len)
|
||||
{
|
||||
int read, rv, n_fs, i;
|
||||
const int n_segs = 2;
|
||||
@ -174,6 +174,7 @@ openssl_read_from_ssl_into_fifo (svm_fifo_t * f, SSL * ssl)
|
||||
if (!max_enq)
|
||||
return 0;
|
||||
|
||||
max_enq = clib_min (max_len, max_enq);
|
||||
n_fs = svm_fifo_provision_chunks (f, fs, n_segs, max_enq);
|
||||
if (n_fs < 0)
|
||||
return 0;
|
||||
@ -533,9 +534,10 @@ static inline int
|
||||
openssl_ctx_read_tls (tls_ctx_t *ctx, session_t *tls_session)
|
||||
{
|
||||
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
|
||||
const u32 max_len = 128 << 10;
|
||||
session_t *app_session;
|
||||
int read;
|
||||
svm_fifo_t *f;
|
||||
int read;
|
||||
|
||||
if (PREDICT_FALSE (SSL_in_init (oc->ssl)))
|
||||
{
|
||||
@ -549,7 +551,7 @@ openssl_ctx_read_tls (tls_ctx_t *ctx, session_t *tls_session)
|
||||
app_session = session_get_from_handle (ctx->app_session_handle);
|
||||
f = app_session->rx_fifo;
|
||||
|
||||
read = openssl_read_from_ssl_into_fifo (f, oc->ssl);
|
||||
read = openssl_read_from_ssl_into_fifo (f, oc->ssl, max_len);
|
||||
|
||||
/* Unrecoverable protocol error. Reset connection */
|
||||
if (PREDICT_FALSE (read < 0))
|
||||
@ -558,8 +560,7 @@ openssl_ctx_read_tls (tls_ctx_t *ctx, session_t *tls_session)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If handshake just completed, session may still be in accepting state */
|
||||
if (read && app_session->session_state >= SESSION_STATE_READY)
|
||||
if (read)
|
||||
tls_notify_app_enqueue (ctx, app_session);
|
||||
|
||||
if ((SSL_pending (oc->ssl) > 0) ||
|
||||
@ -738,30 +739,31 @@ openssl_ctx_init_client (tls_ctx_t * ctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
oc->ssl_ctx = SSL_CTX_new (method);
|
||||
if (oc->ssl_ctx == NULL)
|
||||
oc->client_ssl_ctx = SSL_CTX_new (method);
|
||||
if (oc->client_ssl_ctx == NULL)
|
||||
{
|
||||
TLS_DBG (1, "SSL_CTX_new returned null");
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSL_CTX_set_ecdh_auto (oc->ssl_ctx, 1);
|
||||
SSL_CTX_set_mode (oc->ssl_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
|
||||
SSL_CTX_set_ecdh_auto (oc->client_ssl_ctx, 1);
|
||||
SSL_CTX_set_mode (oc->client_ssl_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
|
||||
#ifdef HAVE_OPENSSL_ASYNC
|
||||
if (om->async)
|
||||
SSL_CTX_set_mode (oc->ssl_ctx, SSL_MODE_ASYNC);
|
||||
SSL_CTX_set_mode (oc->client_ssl_ctx, SSL_MODE_ASYNC);
|
||||
#endif
|
||||
rv = SSL_CTX_set_cipher_list (oc->ssl_ctx, (const char *) om->ciphers);
|
||||
rv =
|
||||
SSL_CTX_set_cipher_list (oc->client_ssl_ctx, (const char *) om->ciphers);
|
||||
if (rv != 1)
|
||||
{
|
||||
TLS_DBG (1, "Couldn't set cipher");
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSL_CTX_set_options (oc->ssl_ctx, flags);
|
||||
SSL_CTX_set_cert_store (oc->ssl_ctx, om->cert_store);
|
||||
SSL_CTX_set_options (oc->client_ssl_ctx, flags);
|
||||
SSL_CTX_set1_cert_store (oc->client_ssl_ctx, om->cert_store);
|
||||
|
||||
oc->ssl = SSL_new (oc->ssl_ctx);
|
||||
oc->ssl = SSL_new (oc->client_ssl_ctx);
|
||||
if (oc->ssl == NULL)
|
||||
{
|
||||
TLS_DBG (1, "Couldn't initialize ssl struct");
|
||||
|
@ -33,7 +33,7 @@ typedef struct tls_ctx_openssl_
|
||||
{
|
||||
tls_ctx_t ctx; /**< First */
|
||||
u32 openssl_ctx_index;
|
||||
SSL_CTX *ssl_ctx;
|
||||
SSL_CTX *client_ssl_ctx;
|
||||
SSL *ssl;
|
||||
BIO *rbio;
|
||||
BIO *wbio;
|
||||
|
@ -445,7 +445,7 @@ picotls_ctx_read (tls_ctx_t *ctx, session_t *tcp_session)
|
||||
app_session = session_get_from_handle (ctx->app_session_handle);
|
||||
wrote = ptls_tcp_to_app_write (ptls_ctx, app_session->rx_fifo, tcp_rx_fifo);
|
||||
|
||||
if (wrote && app_session->session_state >= SESSION_STATE_READY)
|
||||
if (wrote)
|
||||
tls_notify_app_enqueue (ctx, app_session);
|
||||
|
||||
if (ptls_ctx->read_buffer_offset || svm_fifo_max_dequeue (tcp_rx_fifo))
|
||||
|
@ -483,7 +483,7 @@ bfd_transport_udp6 (vlib_main_t *vm, vlib_node_runtime_t *rt, u32 bi,
|
||||
is_echo ? &bm->tx_echo_counter :
|
||||
&bm->tx_counter);
|
||||
}
|
||||
return 1;
|
||||
return rv;
|
||||
}
|
||||
|
||||
static bfd_session_t *
|
||||
|
@ -303,8 +303,17 @@ ethernet_mac_change (vnet_hw_interface_t * hi,
|
||||
|
||||
{
|
||||
ethernet_address_change_ctx_t *cb;
|
||||
u32 id, sw_if_index;
|
||||
vec_foreach (cb, em->address_change_callbacks)
|
||||
cb->function (em, hi->sw_if_index, cb->function_opaque);
|
||||
{
|
||||
cb->function (em, hi->sw_if_index, cb->function_opaque);
|
||||
/* clang-format off */
|
||||
hash_foreach (id, sw_if_index, hi->sub_interface_sw_if_index_by_id,
|
||||
({
|
||||
cb->function (em, sw_if_index, cb->function_opaque);
|
||||
}));
|
||||
/* clang-format on */
|
||||
}
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
|
@ -153,10 +153,14 @@ typedef enum fib_entry_src_attribute_t_ {
|
||||
* the source is inherited from its cover
|
||||
*/
|
||||
FIB_ENTRY_SRC_ATTRIBUTE_INHERITED,
|
||||
/**
|
||||
* the source is currently used as glean src address
|
||||
*/
|
||||
FIB_ENTRY_SRC_ATTRIBUTE_PROVIDES_GLEAN,
|
||||
/**
|
||||
* Marker. add new entries before this one.
|
||||
*/
|
||||
FIB_ENTRY_SRC_ATTRIBUTE_LAST = FIB_ENTRY_SRC_ATTRIBUTE_INHERITED,
|
||||
FIB_ENTRY_SRC_ATTRIBUTE_LAST = FIB_ENTRY_SRC_ATTRIBUTE_PROVIDES_GLEAN,
|
||||
} fib_entry_src_attribute_t;
|
||||
|
||||
|
||||
@ -166,6 +170,7 @@ typedef enum fib_entry_src_attribute_t_ {
|
||||
[FIB_ENTRY_SRC_ATTRIBUTE_ACTIVE] = "active", \
|
||||
[FIB_ENTRY_SRC_ATTRIBUTE_STALE] = "stale", \
|
||||
[FIB_ENTRY_SRC_ATTRIBUTE_INHERITED] = "inherited", \
|
||||
[FIB_ENTRY_SRC_ATTRIBUTE_PROVIDES_GLEAN] = "provides-glean", \
|
||||
}
|
||||
|
||||
#define FOR_EACH_FIB_SRC_ATTRIBUTE(_item) \
|
||||
@ -180,6 +185,7 @@ typedef enum fib_entry_src_flag_t_ {
|
||||
FIB_ENTRY_SRC_FLAG_ACTIVE = (1 << FIB_ENTRY_SRC_ATTRIBUTE_ACTIVE),
|
||||
FIB_ENTRY_SRC_FLAG_STALE = (1 << FIB_ENTRY_SRC_ATTRIBUTE_STALE),
|
||||
FIB_ENTRY_SRC_FLAG_INHERITED = (1 << FIB_ENTRY_SRC_ATTRIBUTE_INHERITED),
|
||||
FIB_ENTRY_SRC_FLAG_PROVIDES_GLEAN = (1 << FIB_ENTRY_SRC_ATTRIBUTE_PROVIDES_GLEAN),
|
||||
} __attribute__ ((packed)) fib_entry_src_flag_t;
|
||||
|
||||
extern u8 * format_fib_entry_src_flags(u8 *s, va_list *args);
|
||||
|
@ -87,8 +87,16 @@ fib_entry_src_interface_update_glean (fib_entry_t *cover,
|
||||
if (fib_prefix_is_cover(&adj->sub_type.glean.rx_pfx,
|
||||
&local->fe_prefix))
|
||||
{
|
||||
adj->sub_type.glean.rx_pfx.fp_addr = local->fe_prefix.fp_addr;
|
||||
return (1);
|
||||
fib_entry_src_t *local_src;
|
||||
|
||||
local_src = fib_entry_src_find (local, FIB_SOURCE_INTERFACE);
|
||||
if (local_src != NULL)
|
||||
{
|
||||
adj->sub_type.glean.rx_pfx.fp_addr =
|
||||
local->fe_prefix.fp_addr;
|
||||
local_src->fes_flags |= FIB_ENTRY_SRC_FLAG_PROVIDES_GLEAN;
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -116,6 +124,52 @@ fib_entry_src_interface_path_swap (fib_entry_src_t *src,
|
||||
src->fes_pl = fib_path_list_create(pl_flags, paths);
|
||||
}
|
||||
|
||||
typedef struct fesi_find_glean_ctx_t_ {
|
||||
fib_node_index_t glean_node_index;
|
||||
} fesi_find_glean_ctx_t;
|
||||
|
||||
static walk_rc_t
|
||||
fib_entry_src_interface_find_glean_walk (fib_entry_t *cover,
|
||||
fib_node_index_t covered,
|
||||
void *ctx)
|
||||
{
|
||||
fesi_find_glean_ctx_t *find_glean_ctx = ctx;
|
||||
fib_entry_t *covered_entry;
|
||||
fib_entry_src_t *covered_src;
|
||||
|
||||
covered_entry = fib_entry_get (covered);
|
||||
covered_src = fib_entry_src_find (covered_entry, FIB_SOURCE_INTERFACE);
|
||||
if ((covered_src != NULL) &&
|
||||
(covered_src->fes_flags & FIB_ENTRY_SRC_FLAG_PROVIDES_GLEAN))
|
||||
{
|
||||
find_glean_ctx->glean_node_index = covered;
|
||||
return WALK_STOP;
|
||||
}
|
||||
|
||||
return WALK_CONTINUE;
|
||||
}
|
||||
|
||||
static fib_entry_t *
|
||||
fib_entry_src_interface_find_glean (fib_entry_t *cover)
|
||||
{
|
||||
fib_entry_src_t *src;
|
||||
|
||||
src = fib_entry_src_find (cover, FIB_SOURCE_INTERFACE);
|
||||
if (src == NULL)
|
||||
/* the cover is not an interface source */
|
||||
return NULL;
|
||||
|
||||
fesi_find_glean_ctx_t ctx = {
|
||||
.glean_node_index = ~0,
|
||||
};
|
||||
|
||||
fib_entry_cover_walk (cover, fib_entry_src_interface_find_glean_walk,
|
||||
&ctx);
|
||||
|
||||
return (ctx.glean_node_index == ~0) ? NULL :
|
||||
fib_entry_get (ctx.glean_node_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* Source activate.
|
||||
* Called when the source is teh new longer best source on the entry
|
||||
@ -128,6 +182,8 @@ fib_entry_src_interface_activate (fib_entry_src_t *src,
|
||||
|
||||
if (FIB_ENTRY_FLAG_LOCAL & src->fes_entry_flags)
|
||||
{
|
||||
u8 update_glean;
|
||||
|
||||
/*
|
||||
* Track the covering attached/connected cover. This is so that
|
||||
* during an attached export of the cover, this local prefix is
|
||||
@ -141,10 +197,17 @@ fib_entry_src_interface_activate (fib_entry_src_t *src,
|
||||
|
||||
cover = fib_entry_get(src->u.interface.fesi_cover);
|
||||
|
||||
/*
|
||||
* Before adding as a child of the cover, check whether an existing
|
||||
* child has already been used to populate the glean adjacency. If so,
|
||||
* we don't need to update the adjacency.
|
||||
*/
|
||||
update_glean = (fib_entry_src_interface_find_glean (cover) == NULL);
|
||||
src->u.interface.fesi_sibling =
|
||||
fib_entry_cover_track(cover, fib_entry_get_index(fib_entry));
|
||||
|
||||
fib_entry_src_interface_update_glean(cover, fib_entry);
|
||||
if (update_glean)
|
||||
fib_entry_src_interface_update_glean(cover, fib_entry);
|
||||
}
|
||||
|
||||
return (!0);
|
||||
@ -167,15 +230,19 @@ fib_entry_src_interface_deactivate (fib_entry_src_t *src,
|
||||
if (FIB_NODE_INDEX_INVALID != src->u.interface.fesi_cover)
|
||||
{
|
||||
cover = fib_entry_get(src->u.interface.fesi_cover);
|
||||
|
||||
fib_entry_cover_untrack(cover, src->u.interface.fesi_sibling);
|
||||
|
||||
src->u.interface.fesi_cover = FIB_NODE_INDEX_INVALID;
|
||||
src->u.interface.fesi_sibling = ~0;
|
||||
|
||||
fib_entry_cover_walk(cover,
|
||||
fib_entry_src_interface_update_glean_walk,
|
||||
NULL);
|
||||
/* If this was the glean address, find a new one */
|
||||
if (src->fes_flags & FIB_ENTRY_SRC_FLAG_PROVIDES_GLEAN)
|
||||
{
|
||||
fib_entry_cover_walk(cover,
|
||||
fib_entry_src_interface_update_glean_walk,
|
||||
NULL);
|
||||
src->fes_flags &= ~FIB_ENTRY_SRC_FLAG_PROVIDES_GLEAN;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1365,7 +1365,8 @@ fib_path_create (fib_node_index_t pl_index,
|
||||
dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
|
||||
}
|
||||
else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
|
||||
(path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
|
||||
(path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH) ||
|
||||
(path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP))
|
||||
{
|
||||
path->fp_type = FIB_PATH_TYPE_SPECIAL;
|
||||
}
|
||||
|
@ -534,7 +534,11 @@ fib_table_route_path_fixup (const fib_prefix_t *prefix,
|
||||
else if (fib_route_path_is_attached(path))
|
||||
{
|
||||
path->frp_flags |= FIB_ROUTE_PATH_GLEAN;
|
||||
fib_prefix_normalize(prefix, &path->frp_connected);
|
||||
/*
|
||||
* attached prefixes are not suitable as the source of ARP requests
|
||||
* so don't save the prefix in the glean adj
|
||||
*/
|
||||
clib_memset(&path->frp_connected, 0, sizeof(path->frp_connected));
|
||||
}
|
||||
if (*eflags & FIB_ENTRY_FLAG_DROP)
|
||||
{
|
||||
|
@ -187,12 +187,16 @@ ip4_arp_inline (vlib_main_t * vm,
|
||||
/* resolve the packet's destination */
|
||||
ip4_header_t *ip0 = vlib_buffer_get_current (p0);
|
||||
resolve0 = ip0->dst_address;
|
||||
src0 = adj0->sub_type.glean.rx_pfx.fp_addr.ip4;
|
||||
}
|
||||
else
|
||||
/* resolve the incomplete adj */
|
||||
resolve0 = adj0->sub_type.nbr.next_hop.ip4;
|
||||
|
||||
if (is_glean && adj0->sub_type.glean.rx_pfx.fp_len)
|
||||
/* the glean is for a connected, local prefix */
|
||||
src0 = adj0->sub_type.glean.rx_pfx.fp_addr.ip4;
|
||||
else
|
||||
{
|
||||
/* resolve the incomplete adj */
|
||||
resolve0 = adj0->sub_type.nbr.next_hop.ip4;
|
||||
/* Src IP address in ARP header. */
|
||||
if (!fib_sas4_get (sw_if_index0, &resolve0, &src0) &&
|
||||
!ip4_sas_by_sw_if_index (sw_if_index0, &resolve0, &src0))
|
||||
|
@ -690,6 +690,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
current_sa_packets = current_sa_bytes = 0;
|
||||
|
||||
sa0 = ipsec_sa_get (sa_index0);
|
||||
current_sa_index = sa_index0;
|
||||
|
||||
if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
|
||||
sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
|
||||
@ -701,7 +702,6 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
sa_index0);
|
||||
goto trace;
|
||||
}
|
||||
current_sa_index = sa_index0;
|
||||
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
|
||||
current_sa_index);
|
||||
|
||||
|
@ -596,7 +596,7 @@ session_program_io_event (app_worker_t *app_wrk, session_t *s,
|
||||
/* Special events for connectionless sessions */
|
||||
et += SESSION_IO_EVT_BUILTIN_RX - SESSION_IO_EVT_RX;
|
||||
|
||||
ASSERT (s->thread_index == 0);
|
||||
ASSERT (s->thread_index == 0 || et == SESSION_IO_EVT_TX_MAIN);
|
||||
session_event_t evt = {
|
||||
.event_type = et,
|
||||
.session_handle = session_handle (s),
|
||||
|
@ -77,10 +77,12 @@ app_worker_flush_events_inline (app_worker_t *app_wrk, u32 thread_index,
|
||||
{
|
||||
application_t *app = application_get (app_wrk->app_index);
|
||||
svm_msg_q_t *mq = app_wrk->event_queue;
|
||||
u8 ring_index, mq_is_cong;
|
||||
session_state_t old_state;
|
||||
session_event_t *evt;
|
||||
u32 n_evts = 128, i;
|
||||
u8 ring_index, mq_is_cong;
|
||||
session_t *s;
|
||||
int rv;
|
||||
|
||||
n_evts = clib_min (n_evts, clib_fifo_elts (app_wrk->wrk_evts[thread_index]));
|
||||
|
||||
@ -111,16 +113,18 @@ app_worker_flush_events_inline (app_worker_t *app_wrk, u32 thread_index,
|
||||
{
|
||||
case SESSION_IO_EVT_RX:
|
||||
s = session_get (evt->session_index, thread_index);
|
||||
s->flags &= ~SESSION_F_RX_EVT;
|
||||
/* Application didn't confirm accept yet */
|
||||
if (PREDICT_FALSE (s->session_state == SESSION_STATE_ACCEPTING))
|
||||
if (PREDICT_FALSE (s->session_state == SESSION_STATE_ACCEPTING ||
|
||||
s->session_state == SESSION_STATE_CONNECTING))
|
||||
break;
|
||||
s->flags &= ~SESSION_F_RX_EVT;
|
||||
app->cb_fns.builtin_app_rx_callback (s);
|
||||
break;
|
||||
/* Handle sessions that might not be on current thread */
|
||||
case SESSION_IO_EVT_BUILTIN_RX:
|
||||
s = session_get_from_handle_if_valid (evt->session_handle);
|
||||
if (!s || s->session_state == SESSION_STATE_ACCEPTING)
|
||||
if (!s || s->session_state == SESSION_STATE_ACCEPTING ||
|
||||
s->session_state == SESSION_STATE_CONNECTING)
|
||||
break;
|
||||
s->flags &= ~SESSION_F_RX_EVT;
|
||||
app->cb_fns.builtin_app_rx_callback (s);
|
||||
@ -145,16 +149,46 @@ app_worker_flush_events_inline (app_worker_t *app_wrk, u32 thread_index,
|
||||
break;
|
||||
case SESSION_CTRL_EVT_ACCEPTED:
|
||||
s = session_get (evt->session_index, thread_index);
|
||||
app->cb_fns.session_accept_callback (s);
|
||||
old_state = s->session_state;
|
||||
if (app->cb_fns.session_accept_callback (s))
|
||||
{
|
||||
session_close (s);
|
||||
s->app_wrk_index = SESSION_INVALID_INDEX;
|
||||
break;
|
||||
}
|
||||
if (is_builtin)
|
||||
{
|
||||
if (old_state >= SESSION_STATE_TRANSPORT_CLOSING)
|
||||
{
|
||||
session_set_state (s, old_state);
|
||||
app_worker_close_notify (app_wrk, s);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SESSION_CTRL_EVT_CONNECTED:
|
||||
if (!(evt->as_u64[1] & 0xffffffff))
|
||||
s = session_get (evt->session_index, thread_index);
|
||||
{
|
||||
s = session_get (evt->session_index, thread_index);
|
||||
old_state = s->session_state;
|
||||
}
|
||||
else
|
||||
s = 0;
|
||||
app->cb_fns.session_connected_callback (app_wrk->wrk_index,
|
||||
evt->as_u64[1] >> 32, s,
|
||||
evt->as_u64[1] & 0xffffffff);
|
||||
rv = app->cb_fns.session_connected_callback (
|
||||
app_wrk->wrk_index, evt->as_u64[1] >> 32, s,
|
||||
evt->as_u64[1] & 0xffffffff);
|
||||
if (!s)
|
||||
break;
|
||||
if (rv)
|
||||
{
|
||||
session_close (s);
|
||||
s->app_wrk_index = SESSION_INVALID_INDEX;
|
||||
break;
|
||||
}
|
||||
if (old_state >= SESSION_STATE_TRANSPORT_CLOSING)
|
||||
{
|
||||
session_set_state (s, old_state);
|
||||
app_worker_close_notify (app_wrk, s);
|
||||
}
|
||||
break;
|
||||
case SESSION_CTRL_EVT_DISCONNECTED:
|
||||
s = session_get (evt->session_index, thread_index);
|
||||
|
@ -456,6 +456,7 @@ session_mq_accepted_reply_handler (session_worker_t *wrk,
|
||||
a->app_index = mp->context;
|
||||
a->handle = mp->handle;
|
||||
vnet_disconnect_session (a);
|
||||
s->app_wrk_index = SESSION_INVALID_INDEX;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1611,7 +1612,9 @@ session_tx_fifo_dequeue_internal (session_worker_t * wrk,
|
||||
clib_llist_index_t ei;
|
||||
u32 n_packets;
|
||||
|
||||
if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
|
||||
if (PREDICT_FALSE ((s->session_state >= SESSION_STATE_TRANSPORT_CLOSED) ||
|
||||
(s->session_state == SESSION_STATE_CONNECTING &&
|
||||
(s->flags & SESSION_F_HALF_OPEN))))
|
||||
return 0;
|
||||
|
||||
/* Clear custom-tx flag used to request reschedule for tx */
|
||||
@ -1784,7 +1787,7 @@ session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node,
|
||||
break;
|
||||
case SESSION_IO_EVT_RX:
|
||||
s = session_event_get_session (wrk, e);
|
||||
if (!s)
|
||||
if (!s || s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
|
||||
break;
|
||||
transport_app_rx_evt (session_get_transport_proto (s),
|
||||
s->connection_index, s->thread_index);
|
||||
|
@ -163,7 +163,7 @@ vl_api_sr_policy_add_v2_t_handler (vl_api_sr_policy_add_v2_t *mp)
|
||||
mp->type, ntohl (mp->fib_table), mp->is_encap, 0, NULL);
|
||||
vec_free (segments);
|
||||
|
||||
REPLY_MACRO (VL_API_SR_POLICY_ADD_REPLY);
|
||||
REPLY_MACRO (VL_API_SR_POLICY_ADD_V2_REPLY);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2123,7 +2123,7 @@ tcp46_rcv_process_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
case TCP_STATE_SYN_RCVD:
|
||||
|
||||
/* Make sure the segment is exactly right */
|
||||
if (tc->rcv_nxt != vnet_buffer (b[0])->tcp.seq_number || is_fin)
|
||||
if (tc->rcv_nxt != vnet_buffer (b[0])->tcp.seq_number)
|
||||
{
|
||||
tcp_send_reset_w_pkt (tc, b[0], thread_index, is_ip4);
|
||||
error = TCP_ERROR_SEGMENT_INVALID;
|
||||
@ -2143,6 +2143,10 @@ tcp46_rcv_process_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Avoid notifying app if connection is about to be closed */
|
||||
if (PREDICT_FALSE (is_fin))
|
||||
break;
|
||||
|
||||
/* Update rtt and rto */
|
||||
tcp_estimate_initial_rtt (tc);
|
||||
tcp_connection_tx_pacer_update (tc);
|
||||
@ -2363,15 +2367,15 @@ tcp46_rcv_process_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
|
||||
tcp_cfg.closewait_time);
|
||||
break;
|
||||
case TCP_STATE_SYN_RCVD:
|
||||
/* Send FIN-ACK, enter LAST-ACK and because the app was not
|
||||
* notified yet, set a cleanup timer instead of relying on
|
||||
* disconnect notify and the implicit close call. */
|
||||
/* Send FIN-ACK and enter TIME-WAIT, as opposed to LAST-ACK,
|
||||
* because the app was not notified yet and we want to avoid
|
||||
* session state transitions to ensure cleanup does not
|
||||
* propagate to app. */
|
||||
tcp_connection_timers_reset (tc);
|
||||
tc->rcv_nxt += 1;
|
||||
tcp_send_fin (tc);
|
||||
tcp_connection_set_state (tc, TCP_STATE_LAST_ACK);
|
||||
tcp_timer_set (&wrk->timer_wheel, tc, TCP_TIMER_WAITCLOSE,
|
||||
tcp_cfg.lastack_time);
|
||||
tcp_connection_set_state (tc, TCP_STATE_TIME_WAIT);
|
||||
tcp_program_cleanup (wrk, tc);
|
||||
break;
|
||||
case TCP_STATE_CLOSE_WAIT:
|
||||
case TCP_STATE_CLOSING:
|
||||
@ -3238,6 +3242,8 @@ do { \
|
||||
_(FIN_WAIT_2, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
|
||||
TCP_ERROR_NONE);
|
||||
_(FIN_WAIT_2, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
|
||||
_ (FIN_WAIT_2, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
|
||||
TCP_ERROR_NONE);
|
||||
_(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
|
||||
_(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
|
||||
TCP_ERROR_NONE);
|
||||
|
@ -667,6 +667,7 @@ tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
|
||||
|
||||
b = vlib_get_buffer (vm, bi);
|
||||
tcp_init_buffer (vm, b);
|
||||
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
|
||||
|
||||
/* Make and write options */
|
||||
tcp_hdr_len = sizeof (tcp_header_t);
|
||||
|
@ -227,7 +227,12 @@ tls_notify_app_connected (tls_ctx_t * ctx, session_error_t err)
|
||||
app_session->opaque = ctx->parent_app_api_context;
|
||||
|
||||
if ((err = app_worker_init_connected (app_wrk, app_session)))
|
||||
goto failed;
|
||||
{
|
||||
app_worker_connect_notify (app_wrk, 0, err, ctx->parent_app_api_context);
|
||||
ctx->no_app_session = 1;
|
||||
session_free (app_session);
|
||||
return -1;
|
||||
}
|
||||
|
||||
app_session->session_state = SESSION_STATE_READY;
|
||||
parent_app_api_ctx = ctx->parent_app_api_context;
|
||||
@ -244,9 +249,6 @@ tls_notify_app_connected (tls_ctx_t * ctx, session_error_t err)
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
ctx->no_app_session = 1;
|
||||
tls_disconnect (ctx->tls_ctx_handle, vlib_get_thread_index ());
|
||||
send_reply:
|
||||
return app_worker_connect_notify (app_wrk, 0, err,
|
||||
ctx->parent_app_api_context);
|
||||
@ -486,6 +488,9 @@ tls_session_accept_callback (session_t * tls_session)
|
||||
* on tls_session rx and potentially invalidating the session pool */
|
||||
app_session = session_alloc (ctx->c_thread_index);
|
||||
app_session->session_state = SESSION_STATE_CREATED;
|
||||
app_session->session_type =
|
||||
session_type_from_proto_and_ip (TRANSPORT_PROTO_TLS, ctx->tcp_is_ip4);
|
||||
app_session->connection_index = ctx->tls_ctx_handle;
|
||||
ctx->c_s_index = app_session->session_index;
|
||||
|
||||
TLS_DBG (1, "Accept on listener %u new connection [%u]%x",
|
||||
@ -511,7 +516,7 @@ tls_app_rx_callback (session_t * tls_session)
|
||||
return 0;
|
||||
|
||||
ctx = tls_ctx_get (tls_session->opaque);
|
||||
if (PREDICT_FALSE (ctx->no_app_session))
|
||||
if (PREDICT_FALSE (ctx->no_app_session || ctx->app_closed))
|
||||
{
|
||||
TLS_DBG (1, "Local App closed");
|
||||
return 0;
|
||||
@ -938,15 +943,18 @@ tls_cleanup_ho (u32 ho_index)
|
||||
int
|
||||
tls_custom_tx_callback (void *session, transport_send_params_t * sp)
|
||||
{
|
||||
session_t *app_session = (session_t *) session;
|
||||
session_t *as = (session_t *) session;
|
||||
tls_ctx_t *ctx;
|
||||
|
||||
if (PREDICT_FALSE (app_session->session_state
|
||||
>= SESSION_STATE_TRANSPORT_CLOSED))
|
||||
return 0;
|
||||
if (PREDICT_FALSE (as->session_state >= SESSION_STATE_TRANSPORT_CLOSED ||
|
||||
as->session_state <= SESSION_STATE_ACCEPTING))
|
||||
{
|
||||
sp->flags |= TRANSPORT_SND_F_DESCHED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx = tls_ctx_get (app_session->connection_index);
|
||||
return tls_ctx_write (ctx, app_session, sp);
|
||||
ctx = tls_ctx_get (as->connection_index);
|
||||
return tls_ctx_write (ctx, as, sp);
|
||||
}
|
||||
|
||||
u8 *
|
||||
@ -1057,6 +1065,7 @@ format_tls_half_open (u8 * s, va_list * args)
|
||||
{
|
||||
u32 ho_index = va_arg (*args, u32);
|
||||
u32 __clib_unused thread_index = va_arg (*args, u32);
|
||||
u32 __clib_unused verbose = va_arg (*args, u32);
|
||||
session_t *tcp_ho;
|
||||
tls_ctx_t *ho_ctx;
|
||||
|
||||
@ -1102,7 +1111,7 @@ tls_enable (vlib_main_t * vm, u8 is_en)
|
||||
vnet_app_attach_args_t _a, *a = &_a;
|
||||
u64 options[APP_OPTIONS_N_OPTIONS];
|
||||
tls_main_t *tm = &tls_main;
|
||||
u32 fifo_size = 128 << 12;
|
||||
u32 fifo_size = 512 << 10;
|
||||
|
||||
if (!is_en)
|
||||
{
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user