Type: improvement Previously multiple sw crypto scheduler queues per core design caused unaverage frame processing rate for each async op ID – the lower the op ID is the highly likely they are processed first. For example, when a RX core is feeding both encryption and decryption jobs of the same crypto algorithm to the queues at a high rate, in the mean time the crypto cores have no enough cycles to process all: the jobs in the decryption queue are less likely being processed, causing packet drop. To improve the situation this patch makes every core only owning a two queues, one for encrypt operations and one for decrypt. The queue is changed either after checking each core or after founding a frame to process. All crypto jobs with different algorithm are pushed to thoses queues and are treated evenly. In addition, the crypto async infra now uses unified dequeue handler, one per engine. Only the active engine will be registered its dequeue handler in crypto main. Signed-off-by: DariuszX Kazimierski <dariuszx.kazimierski@intel.com> Signed-off-by: PiotrX Kleski <piotrx.kleski@intel.com> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com> Signed-off-by: Jakub Wysocki <jakubx.wysocki@intel.com> Change-Id: I517ee8e31633980de5e0dd4b05e1d5db5dea760e
80 lines
2.3 KiB
C
80 lines
2.3 KiB
C
/*
|
|
* Copyright (c) 2020 Intel and/or its affiliates.
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at:
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include <vnet/crypto/crypto.h>
|
|
|
|
#ifndef __crypto_sw_scheduler_h__
|
|
#define __crypto_sw_scheduler_h__
|
|
|
|
#define CRYPTO_SW_SCHEDULER_QUEUE_SIZE 64
|
|
#define CRYPTO_SW_SCHEDULER_QUEUE_MASK (CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1)
|
|
|
|
STATIC_ASSERT ((0 == (CRYPTO_SW_SCHEDULER_QUEUE_SIZE &
|
|
(CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1))),
|
|
"CRYPTO_SW_SCHEDULER_QUEUE_SIZE is not pow2");
|
|
|
|
typedef enum crypto_sw_scheduler_queue_type_t_
|
|
{
|
|
CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT = 0,
|
|
CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT,
|
|
CRYPTO_SW_SCHED_QUEUE_N_TYPES
|
|
} crypto_sw_scheduler_queue_type_t;
|
|
|
|
typedef struct
|
|
{
|
|
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
|
u32 head;
|
|
u32 tail;
|
|
vnet_crypto_async_frame_t **jobs;
|
|
} crypto_sw_scheduler_queue_t;
|
|
|
|
typedef struct
|
|
{
|
|
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
|
crypto_sw_scheduler_queue_t queue[CRYPTO_SW_SCHED_QUEUE_N_TYPES];
|
|
u32 last_serve_lcore_id;
|
|
u8 last_serve_encrypt;
|
|
u8 last_return_queue;
|
|
vnet_crypto_op_t *crypto_ops;
|
|
vnet_crypto_op_t *integ_ops;
|
|
vnet_crypto_op_t *chained_crypto_ops;
|
|
vnet_crypto_op_t *chained_integ_ops;
|
|
vnet_crypto_op_chunk_t *chunks;
|
|
u8 self_crypto_enabled;
|
|
} crypto_sw_scheduler_per_thread_data_t;
|
|
|
|
typedef struct
|
|
{
|
|
u32 crypto_engine_index;
|
|
crypto_sw_scheduler_per_thread_data_t *per_thread_data;
|
|
vnet_crypto_key_t *keys;
|
|
} crypto_sw_scheduler_main_t;
|
|
|
|
extern crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
|
|
|
|
extern int crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled);
|
|
|
|
extern clib_error_t *crypto_sw_scheduler_api_init (vlib_main_t * vm);
|
|
|
|
#endif // __crypto_native_h__
|
|
|
|
/*
|
|
* fd.io coding-style-patch-verification: ON
|
|
*
|
|
* Local Variables:
|
|
* eval: (c-set-style "gnu")
|
|
* End:
|
|
*/
|