vppinfra: refactor interrupt code

Type: improvement
Change-Id: Ie6987736faf7d8a641762e276775da8ee0c03ea4
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2023-11-03 21:57:42 +00:00
parent 236fae462a
commit 7f75e80f08
11 changed files with 129 additions and 167 deletions

View File

@ -187,9 +187,9 @@ snort_deq_node_interrupt (vlib_main_t *vm, vlib_node_runtime_t *node,
snort_instance_t *si;
int inst = -1;
while ((inst = clib_interrupt_get_next (ptd->interrupts, inst)) != -1)
while ((inst = clib_interrupt_get_next_and_clear (ptd->interrupts, inst)) !=
-1)
{
clib_interrupt_clear (ptd->interrupts, inst);
si = vec_elt_at_index (sm->instances, inst);
qp = vec_elt_at_index (si->qpairs, vm->thread_index);
u32 ready = __atomic_load_n (&qp->ready, __ATOMIC_ACQUIRE);

View File

@ -1471,8 +1471,6 @@ vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
else
cpu_time_now = clib_cpu_time_now ();
nm->pending_interrupts = 0;
/* Pre-allocate expired nodes. */
if (!nm->polling_threshold_vector_length)
nm->polling_threshold_vector_length = 10;
@ -1504,7 +1502,6 @@ vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
while (1)
{
vlib_node_runtime_t *n;
u8 pending_interrupts;
if (PREDICT_FALSE (_vec_len (vm->pending_rpc_requests) > 0))
{
@ -1552,19 +1549,14 @@ vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
/* frame */ 0,
cpu_time_now);
pending_interrupts =
__atomic_load_n (&nm->pending_interrupts, __ATOMIC_ACQUIRE);
if (pending_interrupts)
if (clib_interrupt_is_any_pending (nm->pre_input_node_interrupts))
{
int int_num = -1;
nm->pending_interrupts = 0;
while ((int_num = clib_interrupt_get_next (
while ((int_num = clib_interrupt_get_next_and_clear (
nm->pre_input_node_interrupts, int_num)) != -1)
{
vlib_node_runtime_t *n;
clib_interrupt_clear (nm->pre_input_node_interrupts, int_num);
n = vec_elt_at_index (
nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT], int_num);
cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_PRE_INPUT,
@ -1584,15 +1576,14 @@ vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
if (PREDICT_TRUE (is_main && vm->queue_signal_pending == 0))
vm->queue_signal_callback (vm);
if (pending_interrupts)
if (clib_interrupt_is_any_pending (nm->input_node_interrupts))
{
int int_num = -1;
while ((int_num = clib_interrupt_get_next (nm->input_node_interrupts,
int_num)) != -1)
while ((int_num = clib_interrupt_get_next_and_clear (
nm->input_node_interrupts, int_num)) != -1)
{
vlib_node_runtime_t *n;
clib_interrupt_clear (nm->input_node_interrupts, int_num);
n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
int_num);
cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,

View File

@ -692,7 +692,6 @@ typedef struct
/* Node runtime indices for input nodes with pending interrupts. */
void *input_node_interrupts;
void *pre_input_node_interrupts;
volatile u8 pending_interrupts;
/* Input nodes are switched from/to interrupt to/from polling mode
when average vector length goes above/below polling/interrupt

View File

@ -268,8 +268,6 @@ vlib_node_set_interrupt_pending (vlib_main_t *vm, u32 node_index)
clib_interrupt_set_atomic (interrupts, n->runtime_index);
else
clib_interrupt_set (interrupts, n->runtime_index);
__atomic_store_n (&nm->pending_interrupts, 1, __ATOMIC_RELEASE);
}
always_inline vlib_process_t *

View File

@ -250,7 +250,10 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
while (nanosleep (&ts, &tsrem) < 0)
ts = tsrem;
if (*vlib_worker_threads->wait_at_barrier ||
nm->pending_interrupts)
clib_interrupt_is_any_pending (
nm->input_node_interrupts) ||
clib_interrupt_is_any_pending (
nm->pre_input_node_interrupts))
goto done;
}
}

View File

@ -289,10 +289,9 @@ vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index)
{
void *in = rt->rxq_interrupts;
int int_num = -1;
while ((int_num = clib_interrupt_get_next (in, int_num)) !=
-1)
while ((int_num = clib_interrupt_get_next_and_clear (
in, int_num)) != -1)
{
clib_interrupt_clear (in, int_num);
pending_int = clib_bitmap_set (pending_int, int_num, 1);
last_int = clib_max (last_int, int_num);
}

View File

@ -252,14 +252,12 @@ vnet_hw_if_generate_rxq_int_poll_vector (vlib_main_t *vm,
vec_reset_length (rt->rxq_vector_int);
while ((int_num = clib_interrupt_get_next (rt->rxq_interrupts, int_num)) !=
-1)
while ((int_num = clib_interrupt_get_next_and_clear (rt->rxq_interrupts,
int_num)) != -1)
{
vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, int_num);
vnet_hw_if_rxq_poll_vector_t *pv;
clib_interrupt_clear (rt->rxq_interrupts, int_num);
vec_add2 (rt->rxq_vector_int, pv, 1);
pv->dev_instance = rxq->dev_instance;
pv->queue_id = rxq->queue_id;

View File

@ -249,6 +249,7 @@ if(VPP_BUILD_VPPINFRA_TESTS)
fpool
hash
heap
interrupt
longjmp
macros
maplog

View File

@ -1,42 +1,33 @@
/*
* Copyright (c) 2020 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vppinfra/clib.h>
#include <vppinfra/vec.h>
#include <vppinfra/interrupt.h>
#include <vppinfra/format.h>
__clib_export void
clib_interrupt_init (void **data, uword n_int)
clib_interrupt_init (void **data, u32 n_int)
{
clib_interrupt_header_t *h;
uword sz = sizeof (clib_interrupt_header_t);
uword data_size = round_pow2 (n_int, CLIB_CACHE_LINE_BYTES * 8) / 8;
const u32 bits_in_cl = 8 << CLIB_LOG2_CACHE_LINE_BYTES;
u32 sz = sizeof (clib_interrupt_header_t);
u32 n_cl = round_pow2 (n_int, bits_in_cl) / bits_in_cl;
sz += 2 * data_size;
sz += 2 * n_cl * CLIB_CACHE_LINE_BYTES;
h = data[0] = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
clib_memset (data[0], 0, sz);
h->n_int = n_int;
h->n_uword_alloc = (data_size * 8) >> log2_uword_bits;
h->uwords_allocated = n_cl * bits_in_cl / uword_bits;
h->uwords_used = round_pow2 (n_int, uword_bits) / uword_bits;
h->local = (uword *) (h + 1);
h->remote = h->local + h->uwords_allocated;
}
__clib_export void
clib_interrupt_resize (void **data, uword n_int)
clib_interrupt_resize (void **data, u32 n_int)
{
clib_interrupt_header_t *h = data[0];
u32 new_n_uwords, i;
if (data[0] == 0)
{
@ -44,48 +35,37 @@ clib_interrupt_resize (void **data, uword n_int)
return;
}
if (n_int < h->n_int)
if (n_int == h->n_int)
return;
new_n_uwords = round_pow2 (n_int, uword_bits) / uword_bits;
if (new_n_uwords > h->uwords_allocated)
{
uword *old_bmp, *old_abp, v;
old_bmp = clib_interrupt_get_bitmap (data[0]);
old_abp = clib_interrupt_get_atomic_bitmap (data[0]);
for (uword i = 0; i < h->n_uword_alloc; i++)
{
v = old_abp[i];
old_abp[i] = 0;
if (n_int > ((i + 1) * uword_bits))
old_bmp[i] |= v;
else if (n_int > (i * uword_bits))
old_bmp[i] = (old_bmp[i] | v) & pow2_mask (n_int - i * uword_bits);
else
old_bmp[i] = 0;
}
clib_interrupt_header_t *nh;
clib_interrupt_init ((void **) &nh, n_int);
for (int i = 0; i < h->uwords_used; i++)
nh->local[i] = h->local[i] | h->remote[i];
clib_mem_free (data[0]);
data[0] = nh;
return;
}
else if (n_int > h->n_uword_alloc * uword_bits)
{
void *old = data[0];
uword *old_bmp, *old_abp, *new_bmp;
uword n_uwords = round_pow2 (h->n_int, uword_bits) / uword_bits;
clib_interrupt_init (data, n_int);
h = data[0];
new_bmp = clib_interrupt_get_bitmap (data[0]);
old_bmp = clib_interrupt_get_bitmap (old);
old_abp = clib_interrupt_get_atomic_bitmap (old);
for (uword i = 0; i < n_uwords; i++)
new_bmp[i] = old_bmp[i] | old_abp[i];
clib_mem_free (old);
}
h->n_int = n_int;
h->uwords_used = new_n_uwords;
for (i = 0; i < new_n_uwords; i++)
h->local[i] |= h->remote[i];
for (i = 0; i < h->uwords_allocated; i++)
h->remote[i] = 0;
for (i = new_n_uwords; i < h->uwords_allocated; i++)
h->local[i] = 0;
n_int &= pow2_mask (log2_uword_bits);
if (n_int)
h->local[n_int >> log2_uword_bits] &= pow2_mask (n_int);
}
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@ -1,16 +1,5 @@
/*
* Copyright (c) 2020 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef included_clib_interrupt_h
@ -22,12 +11,15 @@
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
int n_int;
uword n_uword_alloc;
u32 n_int;
u32 uwords_allocated;
u32 uwords_used;
uword *local;
uword *remote;
} clib_interrupt_header_t;
void clib_interrupt_init (void **data, uword n_interrupts);
void clib_interrupt_resize (void **data, uword n_interrupts);
void clib_interrupt_init (void **data, u32 n_interrupts);
void clib_interrupt_resize (void **data, u32 n_interrupts);
static_always_inline void
clib_interrupt_free (void **data)
@ -48,96 +40,98 @@ clib_interrupt_get_n_int (void *d)
return 0;
}
static_always_inline uword *
clib_interrupt_get_bitmap (void *d)
{
return d + sizeof (clib_interrupt_header_t);
}
static_always_inline uword *
clib_interrupt_get_atomic_bitmap (void *d)
{
clib_interrupt_header_t *h = d;
return clib_interrupt_get_bitmap (d) + h->n_uword_alloc;
}
static_always_inline void
clib_interrupt_set (void *in, int int_num)
{
uword *bmp = clib_interrupt_get_bitmap (in);
uword mask = 1ULL << (int_num & (uword_bits - 1));
bmp += int_num >> log2_uword_bits;
clib_interrupt_header_t *h = in;
u32 off = int_num >> log2_uword_bits;
uword bit = 1ULL << (int_num & pow2_mask (log2_uword_bits));
ASSERT (int_num < ((clib_interrupt_header_t *) in)->n_int);
ASSERT (int_num < h->n_int);
*bmp |= mask;
h->local[off] |= bit;
}
static_always_inline void
clib_interrupt_set_atomic (void *in, int int_num)
{
uword *bmp = clib_interrupt_get_atomic_bitmap (in);
uword mask = 1ULL << (int_num & (uword_bits - 1));
bmp += int_num >> log2_uword_bits;
clib_interrupt_header_t *h = in;
u32 off = int_num >> log2_uword_bits;
uword bit = 1ULL << (int_num & pow2_mask (log2_uword_bits));
ASSERT (int_num < ((clib_interrupt_header_t *) in)->n_int);
ASSERT (int_num < h->n_int);
__atomic_fetch_or (bmp, mask, __ATOMIC_RELAXED);
__atomic_fetch_or (h->remote + off, bit, __ATOMIC_RELAXED);
}
static_always_inline void
clib_interrupt_clear (void *in, int int_num)
{
uword *bmp = clib_interrupt_get_bitmap (in);
uword *abm = clib_interrupt_get_atomic_bitmap (in);
uword mask = 1ULL << (int_num & (uword_bits - 1));
uword off = int_num >> log2_uword_bits;
clib_interrupt_header_t *h = in;
u32 off = int_num >> log2_uword_bits;
uword bit = 1ULL << (int_num & pow2_mask (log2_uword_bits));
uword *loc = h->local;
uword *rem = h->remote;
uword v;
ASSERT (int_num < ((clib_interrupt_header_t *) in)->n_int);
ASSERT (int_num < h->n_int);
bmp[off] |= __atomic_exchange_n (abm + off, 0, __ATOMIC_SEQ_CST);
bmp[off] &= ~mask;
v = loc[off] | __atomic_exchange_n (rem + off, 0, __ATOMIC_SEQ_CST);
loc[off] = v & ~bit;
}
static_always_inline int
clib_interrupt_get_next (void *in, int last)
clib_interrupt_get_next_and_clear (void *in, int last)
{
uword *bmp = clib_interrupt_get_bitmap (in);
uword *abm = clib_interrupt_get_atomic_bitmap (in);
clib_interrupt_header_t *h = in;
uword bmp_uword, off;
uword bit, v;
uword *loc = h->local;
uword *rem = h->remote;
u32 off, n_uwords = h->uwords_used;
ASSERT (last >= -1 && last < h->n_int);
ASSERT (last >= -1 && last < (int) h->n_int);
off = (last + 1) >> log2_uword_bits;
if (off > h->n_int >> log2_uword_bits || off >= h->n_uword_alloc)
if (off >= n_uwords)
return -1;
last -= off << log2_uword_bits;
bmp[off] |= __atomic_exchange_n (abm + off, 0, __ATOMIC_SEQ_CST);
bmp_uword = bmp[off] & ~pow2_mask (last + 1);
v = loc[off] | __atomic_exchange_n (rem + off, 0, __ATOMIC_SEQ_CST);
loc[off] = v;
next:
if (bmp_uword)
return (off << log2_uword_bits) + count_trailing_zeros (bmp_uword);
v &= ~pow2_mask ((last + 1) & pow2_mask (log2_uword_bits));
off++;
while (v == 0)
{
if (++off == n_uwords)
return -1;
if (off > h->n_int >> log2_uword_bits || off >= h->n_uword_alloc)
return -1;
v = loc[off] | __atomic_exchange_n (rem + off, 0, __ATOMIC_SEQ_CST);
loc[off] = v;
}
bmp[off] |= __atomic_exchange_n (abm + off, 0, __ATOMIC_SEQ_CST);
bmp_uword = bmp[off];
bit = get_lowest_set_bit (v);
loc[off] &= ~bit;
return get_lowest_set_bit_index (bit) + (int) (off << log2_uword_bits);
}
goto next;
static_always_inline int
clib_interrupt_is_any_pending (void *in)
{
clib_interrupt_header_t *h = in;
u32 n_uwords = h->uwords_used;
uword *loc = h->local;
uword *rem = h->remote;
for (u32 i = 0; i < n_uwords; i++)
if (loc[i])
return 1;
for (u32 i = 0; i < n_uwords; i++)
if (rem[i])
return 1;
return 0;
}
#endif /* included_clib_interrupt_h */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@ -33,10 +33,7 @@ int debug = 0;
void
set_and_check_bits (void *interrupts, int num_ints)
{
int step;
for (step = 1; step < num_ints; step++)
for (int step = 1; step < num_ints; step++)
{
int int_num = -1;
int expected = 0;
@ -48,13 +45,15 @@ set_and_check_bits (void *interrupts, int num_ints)
clib_interrupt_set (interrupts, i);
}
while ((int_num = clib_interrupt_get_next (interrupts, int_num)) != -1)
while ((int_num =
clib_interrupt_get_next_and_clear (interrupts, int_num)) != -1)
{
debug (" Got %d, expecting %d\n", int_num, expected);
ASSERT (int_num == expected);
expected += step;
clib_interrupt_clear (interrupts, int_num);
}
int_num = clib_interrupt_get_next_and_clear (interrupts, -1);
ASSERT (int_num == -1);
}
}