GBP: redirect contracts

Change-Id: I463b153de93cfec29a9c15e8e84e41f6003d4c5f
Signed-off-by: Neale Ranns <nranns@cisco.com>
This commit is contained in:
Neale Ranns
2018-11-07 09:25:54 -08:00
committed by Damjan Marion
parent 96e2d4407b
commit 13a08cc098
23 changed files with 2648 additions and 622 deletions

View File

@ -39,7 +39,7 @@ create_cmd::operator==(const create_cmd& other) const
rc_t
create_cmd::issue(connection& con)
{
msg_t req(con.ctx(), std::ref(*this));
msg_t req(con.ctx(), 1, std::ref(*this));
auto& payload = req.get_request().get_payload();
payload.is_add = 1;
@ -82,7 +82,7 @@ delete_cmd::operator==(const delete_cmd& other) const
rc_t
delete_cmd::issue(connection& con)
{
msg_t req(con.ctx(), std::ref(*this));
msg_t req(con.ctx(), 1, std::ref(*this));
auto& payload = req.get_request().get_payload();
payload.is_add = 0;

View File

@ -251,11 +251,47 @@ define gbp_subnet_details
vl_api_gbp_subnet_t subnet;
};
typeonly define gbp_contract
typedef gbp_next_hop
{
vl_api_address_t ip;
vl_api_mac_address_t mac;
u32 bd_id;
u32 rd_id;
};
enum gbp_hash_mode
{
GBP_API_HASH_MODE_SRC_IP,
GBP_API_HASH_MODE_DST_IP,
};
typedef gbp_next_hop_set
{
vl_api_gbp_hash_mode_t hash_mode;
u8 n_nhs;
vl_api_gbp_next_hop_t nhs[8];
};
enum gbp_rule_action
{
GBP_API_RULE_PERMIT,
GBP_API_RULE_DENY,
GBP_API_RULE_REDIRECT,
};
typedef gbp_rule
{
vl_api_gbp_rule_action_t action;
vl_api_gbp_next_hop_set_t nh_set;
};
typedef gbp_contract
{
u16 src_epg;
u16 dst_epg;
u32 acl_index;
u8 n_rules;
vl_api_gbp_rule_t rules[n_rules];
};
autoreply define gbp_contract_add_del

View File

@ -156,16 +156,21 @@ vl_api_gbp_endpoint_add_t_handler (vl_api_gbp_endpoint_add_t * mp)
ip_address_decode (&mp->endpoint.tun.src, &tun_src);
ip_address_decode (&mp->endpoint.tun.dst, &tun_dst);
rv = gbp_endpoint_update (sw_if_index, ips, &mac,
ntohs (mp->endpoint.epg_id),
gef, &tun_src, &tun_dst, &handle);
rv = gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP,
sw_if_index, ips, &mac,
INDEX_INVALID, INDEX_INVALID,
ntohs (mp->endpoint.epg_id),
gef, &tun_src, &tun_dst, &handle);
}
else
{
rv = gbp_endpoint_update (sw_if_index, ips, &mac,
ntohs (mp->endpoint.epg_id),
gef, NULL, NULL, &handle);
rv = gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP,
sw_if_index, ips, &mac,
INDEX_INVALID, INDEX_INVALID,
ntohs (mp->endpoint.epg_id),
gef, NULL, NULL, &handle);
}
vec_free (ips);
BAD_SW_IF_INDEX_LABEL;
/* *INDENT-OFF* */
@ -182,7 +187,7 @@ vl_api_gbp_endpoint_del_t_handler (vl_api_gbp_endpoint_del_t * mp)
vl_api_gbp_endpoint_del_reply_t *rmp;
int rv = 0;
gbp_endpoint_delete (ntohl (mp->handle));
gbp_endpoint_unlock (GBP_ENDPOINT_SRC_CP, ntohl (mp->handle));
REPLY_MACRO (VL_API_GBP_ENDPOINT_DEL_REPLY + GBP_MSG_BASE);
}
@ -210,6 +215,8 @@ static walk_rc_t
gbp_endpoint_send_details (index_t gei, void *args)
{
vl_api_gbp_endpoint_details_t *mp;
gbp_endpoint_loc_t *gel;
gbp_endpoint_fwd_t *gef;
gbp_endpoint_t *ge;
gbp_walk_ctx_t *ctx;
u8 n_ips, ii;
@ -217,7 +224,7 @@ gbp_endpoint_send_details (index_t gei, void *args)
ctx = args;
ge = gbp_endpoint_get (gei);
n_ips = vec_len (ge->ge_ips);
n_ips = vec_len (ge->ge_key.gek_ips);
mp = vl_msg_api_alloc (sizeof (*mp) + (sizeof (*mp->endpoint.ips) * n_ips));
if (!mp)
return 1;
@ -226,28 +233,32 @@ gbp_endpoint_send_details (index_t gei, void *args)
mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_DETAILS + GBP_MSG_BASE);
mp->context = ctx->context;
gel = &ge->ge_locs[0];
gef = &ge->ge_fwd;
if (gbp_endpoint_is_remote (ge))
{
mp->endpoint.sw_if_index = ntohl (ge->tun.ge_parent_sw_if_index);
ip_address_encode (&ge->tun.ge_src, IP46_TYPE_ANY,
mp->endpoint.sw_if_index = ntohl (gel->tun.gel_parent_sw_if_index);
ip_address_encode (&gel->tun.gel_src, IP46_TYPE_ANY,
&mp->endpoint.tun.src);
ip_address_encode (&ge->tun.ge_dst, IP46_TYPE_ANY,
ip_address_encode (&gel->tun.gel_dst, IP46_TYPE_ANY,
&mp->endpoint.tun.dst);
}
else
{
mp->endpoint.sw_if_index = ntohl (ge->ge_sw_if_index);
mp->endpoint.sw_if_index = ntohl (gef->gef_itf);
}
mp->endpoint.epg_id = ntohs (ge->ge_epg_id);
mp->endpoint.epg_id = ntohs (ge->ge_fwd.gef_epg_id);
mp->endpoint.n_ips = n_ips;
mp->endpoint.flags = gbp_endpoint_flags_encode (ge->ge_flags);
mp->endpoint.flags = gbp_endpoint_flags_encode (gef->gef_flags);
mp->handle = htonl (gei);
mp->age = vlib_time_now (vlib_get_main ()) - ge->ge_last_time;
mac_address_encode (&ge->ge_mac, &mp->endpoint.mac);
mac_address_encode (&ge->ge_key.gek_mac, &mp->endpoint.mac);
vec_foreach_index (ii, ge->ge_ips)
vec_foreach_index (ii, ge->ge_key.gek_ips)
{
ip_address_encode (&ge->ge_ips[ii], IP46_TYPE_ANY, &mp->endpoint.ips[ii]);
ip_address_encode (&ge->ge_key.gek_ips[ii].fp_addr,
IP46_TYPE_ANY, &mp->endpoint.ips[ii]);
}
vl_api_send_msg (ctx->reg, (u8 *) mp);
@ -672,20 +683,186 @@ vl_api_gbp_recirc_dump_t_handler (vl_api_gbp_recirc_dump_t * mp)
gbp_recirc_walk (gbp_recirc_send_details, &ctx);
}
static int
gbp_contract_rule_action_deocde (vl_api_gbp_rule_action_t in,
gbp_rule_action_t * out)
{
in = clib_net_to_host_u32 (in);
switch (in)
{
case GBP_API_RULE_PERMIT:
*out = GBP_RULE_PERMIT;
return (0);
case GBP_API_RULE_DENY:
*out = GBP_RULE_DENY;
return (0);
case GBP_API_RULE_REDIRECT:
*out = GBP_RULE_REDIRECT;
return (0);
}
return (-1);
}
static int
gbp_hash_mode_decode (vl_api_gbp_hash_mode_t in, gbp_hash_mode_t * out)
{
in = clib_net_to_host_u32 (in);
switch (in)
{
case GBP_API_HASH_MODE_SRC_IP:
*out = GBP_HASH_MODE_SRC_IP;
return (0);
case GBP_API_HASH_MODE_DST_IP:
*out = GBP_HASH_MODE_DST_IP;
return (0);
}
return (-2);
}
static int
gbp_next_hop_decode (const vl_api_gbp_next_hop_t * in, index_t * gnhi)
{
ip46_address_t ip;
mac_address_t mac;
index_t grd, gbd;
gbd = gbp_bridge_domain_find_and_lock (ntohl (in->bd_id));
if (INDEX_INVALID == gbd)
return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
grd = gbp_route_domain_find_and_lock (ntohl (in->rd_id));
if (INDEX_INVALID == grd)
return (VNET_API_ERROR_NO_SUCH_FIB);
ip_address_decode (&in->ip, &ip);
mac_address_decode (&in->mac, &mac);
*gnhi = gbp_next_hop_alloc (&ip, grd, &mac, gbd);
return (0);
}
static int
gbp_next_hop_set_decode (const vl_api_gbp_next_hop_set_t * in,
gbp_hash_mode_t * hash_mode, index_t ** out)
{
index_t *gnhis = NULL;
int rv;
u8 ii;
rv = gbp_hash_mode_decode (in->hash_mode, hash_mode);
if (0 != rv)
return rv;
vec_validate (gnhis, in->n_nhs - 1);
for (ii = 0; ii < in->n_nhs; ii++)
{
rv = gbp_next_hop_decode (&in->nhs[ii], &gnhis[ii]);
if (0 != rv)
{
vec_free (gnhis);
break;
}
}
*out = gnhis;
return (rv);
}
static int
gbp_contract_rule_decode (const vl_api_gbp_rule_t * in, index_t * gui)
{
gbp_hash_mode_t hash_mode;
gbp_rule_action_t action;
index_t *nhs = NULL;
int rv;
rv = gbp_contract_rule_action_deocde (in->action, &action);
if (0 != rv)
return rv;
if (GBP_RULE_REDIRECT == action)
{
rv = gbp_next_hop_set_decode (&in->nh_set, &hash_mode, &nhs);
if (0 != rv)
return (rv);
}
else
{
hash_mode = GBP_HASH_MODE_SRC_IP;
}
*gui = gbp_rule_alloc (action, hash_mode, nhs);
return (rv);
}
static int
gbp_contract_rules_decode (u8 n_rules,
const vl_api_gbp_rule_t * rules, index_t ** out)
{
index_t *guis = NULL;
int rv;
u8 ii;
if (0 == n_rules)
{
*out = NULL;
return (0);
}
vec_validate (guis, n_rules - 1);
for (ii = 0; ii < n_rules; ii++)
{
rv = gbp_contract_rule_decode (&rules[ii], &guis[ii]);
if (0 != rv)
{
vec_free (guis);
return (rv);
}
}
*out = guis;
return (rv);
}
static void
vl_api_gbp_contract_add_del_t_handler (vl_api_gbp_contract_add_del_t * mp)
{
vl_api_gbp_contract_add_del_reply_t *rmp;
index_t *rules;
int rv = 0;
if (mp->is_add)
gbp_contract_update (ntohs (mp->contract.src_epg),
ntohs (mp->contract.dst_epg),
ntohl (mp->contract.acl_index));
else
gbp_contract_delete (ntohs (mp->contract.src_epg),
ntohs (mp->contract.dst_epg));
{
rv = gbp_contract_rules_decode (mp->contract.n_rules,
mp->contract.rules, &rules);
if (0 != rv)
goto out;
rv = gbp_contract_update (ntohs (mp->contract.src_epg),
ntohs (mp->contract.dst_epg),
ntohl (mp->contract.acl_index), rules);
}
else
rv = gbp_contract_delete (ntohs (mp->contract.src_epg),
ntohs (mp->contract.dst_epg));
out:
REPLY_MACRO (VL_API_GBP_CONTRACT_ADD_DEL_REPLY + GBP_MSG_BASE);
}
@ -706,7 +883,7 @@ gbp_contract_send_details (gbp_contract_t * gbpc, void *args)
mp->contract.src_epg = ntohs (gbpc->gc_key.gck_src);
mp->contract.dst_epg = ntohs (gbpc->gc_key.gck_dst);
mp->contract.acl_index = ntohl (gbpc->gc_value.gc_acl_index);
// mp->contract.acl_index = ntohl (gbpc->gc_value.gc_acl_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);

View File

@ -41,6 +41,12 @@ vlib_log_class_t gb_logger;
#define GBP_BD_DBG(...) \
vlib_log_debug (gb_logger, __VA_ARGS__);
index_t
gbp_bridge_domain_index (const gbp_bridge_domain_t * gbd)
{
return (gbd - gbp_bridge_domain_pool);
}
static void
gbp_bridge_domain_lock (index_t i)
{
@ -96,6 +102,38 @@ gbp_bridge_domain_db_remove (gbp_bridge_domain_t * gb)
gbp_bridge_domain_db.gbd_by_bd_index[gb->gb_bd_index] = INDEX_INVALID;
}
static u8 *
format_gbp_bridge_domain_ptr (u8 * s, va_list * args)
{
gbp_bridge_domain_t *gb = va_arg (*args, gbp_bridge_domain_t *);
vnet_main_t *vnm = vnet_get_main ();
if (NULL != gb)
s = format (s, "[%d] bd:[%d,%d], bvi:%U uu-flood:%U locks:%d",
gb - gbp_bridge_domain_pool,
gb->gb_bd_id,
gb->gb_bd_index,
format_vnet_sw_if_index_name, vnm, gb->gb_bvi_sw_if_index,
format_vnet_sw_if_index_name, vnm, gb->gb_uu_fwd_sw_if_index,
gb->gb_locks);
else
s = format (s, "NULL");
return (s);
}
u8 *
format_gbp_bridge_domain (u8 * s, va_list * args)
{
index_t gbi = va_arg (*args, index_t);
s =
format (s, "%U", format_gbp_bridge_domain_ptr,
gbp_bridge_domain_get (gbi));
return (s);
}
int
gbp_bridge_domain_add_and_lock (u32 bd_id,
gbp_bridge_domain_flags_t flags,
@ -158,7 +196,7 @@ gbp_bridge_domain_add_and_lock (u32 bd_id,
gb->gb_locks++;
}
GBP_BD_DBG ("add: %U", format_gbp_bridge_domain, gb);
GBP_BD_DBG ("add: %U", format_gbp_bridge_domain_ptr, gb);
return (0);
}
@ -174,7 +212,7 @@ gbp_bridge_domain_unlock (index_t index)
if (0 == gb->gb_locks)
{
GBP_BD_DBG ("destroy: %U", format_gbp_bridge_domain, gb);
GBP_BD_DBG ("destroy: %U", format_gbp_bridge_domain_ptr, gb);
l2fib_del_entry (vnet_sw_interface_get_hw_address
(vnet_get_main (), gb->gb_bvi_sw_if_index),
@ -204,8 +242,7 @@ gbp_bridge_domain_delete (u32 bd_id)
if (INDEX_INVALID != gbi)
{
GBP_BD_DBG ("del: %U", format_gbp_bridge_domain,
gbp_bridge_domain_get (gbi));
GBP_BD_DBG ("del: %U", format_gbp_bridge_domain, gbi);
gbp_bridge_domain_unlock (gbi);
return (0);
@ -287,33 +324,13 @@ VLIB_CLI_COMMAND (gbp_bridge_domain_cli_node, static) = {
.function = gbp_bridge_domain_cli,
};
u8 *
format_gbp_bridge_domain (u8 * s, va_list * args)
{
gbp_bridge_domain_t *gb = va_arg (*args, gbp_bridge_domain_t*);
vnet_main_t *vnm = vnet_get_main ();
if (NULL != gb)
s = format (s, "[%d] bd:[%d,%d], bvi:%U uu-flood:%U locks:%d",
gb - gbp_bridge_domain_pool,
gb->gb_bd_id,
gb->gb_bd_index,
format_vnet_sw_if_index_name, vnm, gb->gb_bvi_sw_if_index,
format_vnet_sw_if_index_name, vnm, gb->gb_uu_fwd_sw_if_index,
gb->gb_locks);
else
s = format (s, "NULL");
return (s);
}
static int
gbp_bridge_domain_show_one (gbp_bridge_domain_t *gb, void *ctx)
{
vlib_main_t *vm;
vm = ctx;
vlib_cli_output (vm, " %U",format_gbp_bridge_domain, gb);
vlib_cli_output (vm, " %U", format_gbp_bridge_domain_ptr, gb);
return (1);
}

View File

@ -77,6 +77,7 @@ extern int gbp_bridge_domain_add_and_lock (u32 bd_id,
extern void gbp_bridge_domain_unlock (index_t gbi);
extern index_t gbp_bridge_domain_find_and_lock (u32 bd_id);
extern int gbp_bridge_domain_delete (u32 bd_id);
extern index_t gbp_bridge_domain_index (const gbp_bridge_domain_t *);
typedef int (*gbp_bridge_domain_cb_t) (gbp_bridge_domain_t * gb, void *ctx);
extern void gbp_bridge_domain_walk (gbp_bridge_domain_cb_t bgpe, void *ctx);

View File

@ -154,7 +154,7 @@ gbp_classify_inline (vlib_main_t * vm,
}
if (PREDICT_TRUE (NULL != ge0))
src_epg = ge0->ge_epg_id;
src_epg = ge0->ge_fwd.gef_epg_id;
else
src_epg = EPG_INVALID;
}

File diff suppressed because it is too large Load Diff

View File

@ -37,24 +37,69 @@ typedef struct gbp_contract_key_t_
};
} gbp_contract_key_t;
/**
* The value for an Contract
*/
typedef struct gbp_contract_value_t_
typedef struct gbp_next_hop_t_
{
union
{
struct
{
/**
* lookup context and acl index
*/
u32 gc_lc_index;
u32 gc_acl_index;
};
u64 as_u64;
};
} gbp_contract_value_t;
fib_node_t gnh_node;
ip46_address_t gnh_ip;
mac_address_t gnh_mac;
index_t gnh_gu;
index_t gnh_bd;
index_t gnh_rd;
u32 gnh_ge;
u32 gnh_sibling;
index_t gnh_ai[FIB_PROTOCOL_IP_MAX];
} gbp_next_hop_t;
#define foreach_gbp_hash_mode \
_(SRC_IP, "src-ip") \
_(DST_IP, "dst-ip")
typedef enum gbp_hash_mode_t_
{
#define _(v,s) GBP_HASH_MODE_##v,
foreach_gbp_hash_mode
#undef _
} gbp_hash_mode_t;
#define foreach_gbp_rule_action \
_(PERMIT, "permit") \
_(DENY, "deny") \
_(REDIRECT, "redirect")
typedef enum gbp_rule_action_t_
{
#define _(v,s) GBP_RULE_##v,
foreach_gbp_rule_action
#undef _
} gbp_rule_action_t;
#define foreach_gbp_policy_node \
_(L2, "L2") \
_(IP4, "ip4") \
_(IP6, "ip6")
typedef enum gbp_policy_node_t_
{
#define _(v,s) GBP_POLICY_NODE_##v,
foreach_gbp_policy_node
#undef _
} gbp_policy_node_t;
#define GBP_POLICY_N_NODES (GBP_POLICY_NODE_IP6+1)
#define FOR_EACH_GBP_POLICY_NODE(pnode) \
for (pnode = GBP_POLICY_NODE_L2; pnode < GBP_POLICY_N_NODES; pnode++)
typedef struct gbp_rule_t_
{
gbp_rule_action_t gu_action;
gbp_hash_mode_t gu_hash_mode;
index_t *gu_nhs;
/**
* DPO of the load-balance object used to redirect
*/
dpo_id_t gu_dpo[GBP_POLICY_N_NODES][FIB_PROTOCOL_IP_MAX];
} gbp_rule_t;
/**
* A Group Based Policy Contract.
@ -67,10 +112,13 @@ typedef struct gbp_contract_t_
*/
gbp_contract_key_t gc_key;
u32 gc_acl_index;
u32 gc_lc_index;
/**
* The ACL to apply for packets from the source to the destination EPG
*/
gbp_contract_value_t gc_value;
index_t *gc_rules;
} gbp_contract_t;
/**
@ -84,21 +132,29 @@ typedef struct gbp_contract_db_t_
uword *gc_hash;
} gbp_contract_db_t;
extern void gbp_contract_update (epg_id_t src_epg,
epg_id_t dst_epg, u32 acl_index);
extern void gbp_contract_delete (epg_id_t src_epg, epg_id_t dst_epg);
extern int gbp_contract_update (epg_id_t src_epg,
epg_id_t dst_epg,
u32 acl_index, index_t * rules);
extern int gbp_contract_delete (epg_id_t src_epg, epg_id_t dst_epg);
extern index_t gbp_rule_alloc (gbp_rule_action_t action,
gbp_hash_mode_t hash_mode, index_t * nhs);
extern index_t gbp_next_hop_alloc (const ip46_address_t * ip,
index_t grd,
const mac_address_t * mac, index_t gbd);
typedef int (*gbp_contract_cb_t) (gbp_contract_t * gbpe, void *ctx);
extern void gbp_contract_walk (gbp_contract_cb_t bgpe, void *ctx);
extern u8 *format_gbp_contract (u8 * s, va_list * args);
/**
* DP functions and databases
*/
extern gbp_contract_db_t gbp_contract_db;
always_inline u64
gbp_acl_lookup (gbp_contract_key_t * key)
always_inline index_t
gbp_contract_find (gbp_contract_key_t * key)
{
uword *p;
@ -107,7 +163,23 @@ gbp_acl_lookup (gbp_contract_key_t * key)
if (NULL != p)
return (p[0]);
return (~0);
return (INDEX_INVALID);
}
extern gbp_contract_t *gbp_contract_pool;
always_inline gbp_contract_t *
gbp_contract_get (index_t gci)
{
return (pool_elt_at_index (gbp_contract_pool, gci));
}
extern gbp_rule_t *gbp_rule_pool;
always_inline gbp_rule_t *
gbp_rule_get (index_t gui)
{
return (pool_elt_at_index (gbp_rule_pool, gui));
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -53,6 +53,121 @@ typedef enum gbp_endpoint_flags_t_
extern u8 *format_gbp_endpoint_flags (u8 * s, va_list * args);
/**
* Sources of Endpoints in priority order. The best (lowest value) source
* provides the forwarding information
*/
#define foreach_gbp_endpoint_src \
_(CP, "control-plane") \
_(DP, "data-plane") \
_(RR, "recursive-resolution")
typedef enum gbp_endpoint_src_t_
{
#define _(v,s) GBP_ENDPOINT_SRC_##v,
foreach_gbp_endpoint_src
#undef _
} gbp_endpoint_src_t;
#define GBP_ENDPOINT_SRC_MAX (GBP_ENDPOINT_SRC_RR+1)
extern u8 *format_gbp_endpoint_src (u8 * s, va_list * args);
/**
* This is the identity of an endpoint, as such it is information
* about an endpoint that is idempotent.
* The ID is used to add the EP into the various data-bases for retrieval.
*/
typedef struct gbp_endpoint_key_t_
{
/**
* A vector of ip addresses that belong to the endpoint.
* Together with the route EPG's RD this forms the EP's L3 key
*/
fib_prefix_t *gek_ips;
/**
* MAC address of the endpoint.
* Together with the route EPG's BD this forms the EP's L2 key
*/
mac_address_t gek_mac;
/**
* Index of the Bridge-Domain
*/
index_t gek_gbd;
/**
* Index of the Route-Domain
*/
index_t gek_grd;
} gbp_endpoint_key_t;
/**
* Information about the location of the endpoint provided by a source
* of endpoints
*/
typedef struct gbp_endpoint_loc_t_
{
/**
* The source providing this location information
*/
gbp_endpoint_src_t gel_src;
/**
* The interface on which the EP is connected
*/
u32 gel_sw_if_index;
/**
* Endpoint flags
*/
gbp_endpoint_flags_t gel_flags;
/**
* Endpoint Group.
*/
index_t gel_epg;
/**
* number of times this source has locked this
*/
u32 gel_locks;
/**
* Tunnel info for remote endpoints
*/
struct
{
u32 gel_parent_sw_if_index;
ip46_address_t gel_src;
ip46_address_t gel_dst;
} tun;
} gbp_endpoint_loc_t;
/**
* And endpoints current forwarding state
*/
typedef struct gbp_endpoint_fwd_t_
{
/**
* The interface on which the EP is connected
*/
index_t gef_itf;
/**
* The L3 adj, if created
*/
index_t *gef_adjs;
/**
* Endpoint Group's ID. cached for fast DP access.
*/
epg_id_t gef_epg_id;
gbp_endpoint_flags_t gef_flags;
} gbp_endpoint_fwd_t;
/**
* A Group Based Policy Endpoint.
* This is typically a VM or container. If the endpoint is local (i.e. on
@ -64,55 +179,27 @@ extern u8 *format_gbp_endpoint_flags (u8 * s, va_list * args);
typedef struct gbp_endpoint_t_
{
/**
* The interface on which the EP is connected
* A FIB node that allows the tracking of children.
*/
index_t ge_itf;
u32 ge_sw_if_index;
fib_node_t ge_node;
/**
* A vector of ip addresses that below to the endpoint
* The key/ID of this EP
*/
const ip46_address_t *ge_ips;
gbp_endpoint_key_t ge_key;
/**
* MAC address of the endpoint
* Location information provided by the various sources.
* These are sorted based on source priority.
*/
mac_address_t ge_mac;
gbp_endpoint_loc_t *ge_locs;
/**
* Index of the Endpoint's Group
*/
index_t ge_epg;
/**
* Endpoint Group's ID
*/
index_t ge_epg_id;
/**
* Endpoint flags
*/
gbp_endpoint_flags_t ge_flags;
/**
* The L3 adj, if created
*/
index_t *ge_adjs;
gbp_endpoint_fwd_t ge_fwd;
/**
* The last time a packet from seen from this end point
*/
f64 ge_last_time;
/**
* Tunnel info for remote endpoints
*/
struct
{
u32 ge_parent_sw_if_index;
ip46_address_t ge_src;
ip46_address_t ge_dst;
} tun;
} gbp_endpoint_t;
extern u8 *format_gbp_endpoint (u8 * s, va_list * args);
@ -127,22 +214,31 @@ typedef struct gbp_ep_by_ip_itf_db_t_
clib_bihash_16_8_t ged_by_mac_bd;
} gbp_ep_db_t;
extern int gbp_endpoint_update (u32 sw_if_index,
const ip46_address_t * ip,
const mac_address_t * mac,
epg_id_t epg_id,
gbp_endpoint_flags_t flags,
const ip46_address_t * tun_src,
const ip46_address_t * tun_dst, u32 * handle);
extern void gbp_endpoint_delete (index_t gbpei);
extern int gbp_endpoint_update_and_lock (gbp_endpoint_src_t src,
u32 sw_if_index,
const ip46_address_t * ip,
const mac_address_t * mac,
index_t gbd, index_t grd,
epg_id_t epg_id,
gbp_endpoint_flags_t flags,
const ip46_address_t * tun_src,
const ip46_address_t * tun_dst,
u32 * handle);
extern void gbp_endpoint_unlock (gbp_endpoint_src_t src, index_t gbpei);
extern u32 gbp_endpoint_child_add (index_t gei,
fib_node_type_t type,
fib_node_index_t index);
extern void gbp_endpoint_child_remove (index_t gei, u32 sibling);
typedef walk_rc_t (*gbp_endpoint_cb_t) (index_t gbpei, void *ctx);
extern void gbp_endpoint_walk (gbp_endpoint_cb_t cb, void *ctx);
extern void gbp_endpoint_scan (vlib_main_t * vm);
extern f64 gbp_endpoint_scan_threshold (void);
extern int gbp_endpoint_is_remote (const gbp_endpoint_t * ge);
extern int gbp_endpoint_is_learnt (const gbp_endpoint_t * ge);
extern void gbp_endpoint_flush (u32 sw_if_index);
extern void gbp_endpoint_flush (gbp_endpoint_src_t src, u32 sw_if_index);
/**
* DP functions and databases

View File

@ -44,7 +44,7 @@ gbp_endpoint_group_get (index_t i)
return (pool_elt_at_index (gbp_endpoint_group_pool, i));
}
static void
void
gbp_endpoint_group_lock (index_t i)
{
gbp_endpoint_group_t *gg;
@ -66,21 +66,6 @@ gbp_endpoint_group_find (epg_id_t epg_id)
return (INDEX_INVALID);
}
index_t
gbp_endpoint_group_find_and_lock (epg_id_t epg_id)
{
uword *p;
p = hash_get (gbp_endpoint_group_db.gg_hash, epg_id);
if (NULL != p)
{
gbp_endpoint_group_lock (p[0]);
return p[0];
}
return (INDEX_INVALID);
}
int
gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
u32 bd_id, u32 rd_id, u32 uplink_sw_if_index)
@ -165,6 +150,9 @@ gbp_endpoint_group_unlock (index_t ggi)
{
gbp_endpoint_group_t *gg;
if (INDEX_INVALID == ggi)
return;
gg = gbp_endpoint_group_get (ggi);
gg->gg_locks--;
@ -227,7 +215,7 @@ gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t * gg)
}
index_t
gbp_endpoint_group_get_fib_index (gbp_endpoint_group_t * gg,
gbp_endpoint_group_get_fib_index (const gbp_endpoint_group_t * gg,
fib_protocol_t fproto)
{
const gbp_route_domain_t *grd;
@ -237,16 +225,6 @@ gbp_endpoint_group_get_fib_index (gbp_endpoint_group_t * gg,
return (grd->grd_fib_index[fproto]);
}
u32
gbp_endpoint_group_get_bvi (gbp_endpoint_group_t * gg)
{
const gbp_bridge_domain_t *gb;
gb = gbp_bridge_domain_get (gg->gg_gbd);
return (gb->gb_bvi_sw_if_index);
}
void
gbp_endpoint_group_walk (gbp_endpoint_group_cb_t cb, void *ctx)
{

View File

@ -74,16 +74,15 @@ extern int gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
u32 bd_id,
u32 rd_id,
u32 uplink_sw_if_index);
extern index_t gbp_endpoint_group_find_and_lock (epg_id_t epg_id);
extern index_t gbp_endpoint_group_find (epg_id_t epg_id);
extern int gbp_endpoint_group_delete (epg_id_t epg_id);
extern void gbp_endpoint_group_unlock (index_t index);
extern void gbp_endpoint_group_lock (index_t index);
extern u32 gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t *);
extern gbp_endpoint_group_t *gbp_endpoint_group_get (index_t i);
extern index_t gbp_endpoint_group_get_fib_index (gbp_endpoint_group_t * gg,
fib_protocol_t fproto);
extern u32 gbp_endpoint_group_get_bvi (gbp_endpoint_group_t * gg);
extern index_t gbp_endpoint_group_get_fib_index (const gbp_endpoint_group_t *
gg, fib_protocol_t fproto);
typedef int (*gbp_endpoint_group_cb_t) (gbp_endpoint_group_t * gbpe,
void *ctx);

View File

@ -110,11 +110,14 @@ gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
* flip the source and dst, since that's how it was received, this API
* takes how it's sent
*/
gbp_endpoint_update (gl2->sw_if_index, ips,
&gl2->mac, gl2->epg,
(GBP_ENDPOINT_FLAG_LEARNT |
GBP_ENDPOINT_FLAG_REMOTE),
&gl2->outer_dst, &gl2->outer_src, NULL);
gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
gl2->sw_if_index, ips,
&gl2->mac, INDEX_INVALID,
INDEX_INVALID, gl2->epg,
(GBP_ENDPOINT_FLAG_LEARNT |
GBP_ENDPOINT_FLAG_REMOTE),
&gl2->outer_dst, &gl2->outer_src, NULL);
vec_free (ips);
}
static void
@ -273,7 +276,7 @@ gbp_learn_l2 (vlib_main_t * vm,
/*
* check for new EP or a moved EP
*/
if (NULL == ge0 || ge0->ge_sw_if_index != sw_if_index0)
if (NULL == ge0 || ge0->ge_fwd.gef_itf != sw_if_index0)
{
/*
@ -415,10 +418,13 @@ gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
vec_add1 (ips, gl3->ip);
gbp_endpoint_update (gl3->sw_if_index, ips, NULL, gl3->epg,
(GBP_ENDPOINT_FLAG_REMOTE |
GBP_ENDPOINT_FLAG_LEARNT),
&gl3->outer_dst, &gl3->outer_src, NULL);
gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
gl3->sw_if_index, ips, NULL,
INDEX_INVALID, INDEX_INVALID, gl3->epg,
(GBP_ENDPOINT_FLAG_REMOTE |
GBP_ENDPOINT_FLAG_LEARNT),
&gl3->outer_dst, &gl3->outer_src, NULL);
vec_free (ips);
}
static void

View File

@ -14,6 +14,7 @@
*/
#include <plugins/gbp/gbp.h>
#include <plugins/gbp/gbp_policy_dpo.h>
#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
@ -67,6 +68,44 @@ typedef struct gbp_policy_trace_t_
u32 allowed;
} gbp_policy_trace_t;
always_inline dpo_proto_t
ethertype_to_dpo_proto (u16 etype)
{
etype = clib_net_to_host_u16 (etype);
switch (etype)
{
case ETHERNET_TYPE_IP4:
return (DPO_PROTO_IP4);
case ETHERNET_TYPE_IP6:
return (DPO_PROTO_IP6);
}
return (DPO_PROTO_NONE);
}
always_inline u32
gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0)
{
const ethernet_header_t *eth0;
const dpo_id_t *dpo;
dpo_proto_t dproto;
eth0 = vlib_buffer_get_current (b0);
/* pop the ethernet header to prepare for L3 rewrite */
vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
dproto = ethertype_to_dpo_proto (eth0->type);
dpo = &gu->gu_dpo[GBP_POLICY_NODE_L2][dproto];
/* save the LB index for the next node and reset the IP flow hash
* so it's recalculated */
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
vnet_buffer (b0)->ip.flow_hash = 0;
return (dpo->dpoi_next_node);
}
static uword
gbp_policy_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
@ -93,12 +132,12 @@ gbp_policy_inline (vlib_main_t * vm,
const gbp_endpoint_t *ge0;
gbp_policy_next_t next0;
gbp_contract_key_t key0;
gbp_contract_value_t value0 = {
.as_u64 = ~0,
};
gbp_contract_t *gc0;
u32 bi0, sw_if_index0;
vlib_buffer_t *b0;
index_t gci0;
gc0 = NULL;
next0 = GBP_POLICY_NEXT_DENY;
bi0 = from[0];
to_next[0] = bi0;
@ -136,7 +175,7 @@ gbp_policy_inline (vlib_main_t * vm,
vnet_buffer (b0)->l2.bd_index);
if (NULL != ge0)
key0.gck_dst = ge0->ge_epg_id;
key0.gck_dst = ge0->ge_fwd.gef_epg_id;
else
/* If you cannot determine the destination EP then drop */
goto trace;
@ -161,9 +200,9 @@ gbp_policy_inline (vlib_main_t * vm,
}
else
{
value0.as_u64 = gbp_acl_lookup (&key0);
gci0 = gbp_contract_find (&key0);
if (~0 != value0.gc_lc_index)
if (INDEX_INVALID != gci0)
{
fa_5tuple_opaque_t pkt_5tuple0;
u8 action0 = 0;
@ -173,6 +212,7 @@ gbp_policy_inline (vlib_main_t * vm,
u16 ether_type0;
u8 is_ip60 = 0;
gc0 = gbp_contract_get (gci0);
l2_len0 = vnet_buffer (b0)->l2.l2_len;
h0 = vlib_buffer_get_current (b0);
@ -185,14 +225,14 @@ gbp_policy_inline (vlib_main_t * vm,
*/
acl_plugin_fill_5tuple_inline (gm->
acl_plugin.p_acl_main,
value0.gc_lc_index, b0,
gc0->gc_lc_index, b0,
is_ip60,
/* is_input */ 0,
/* is_l2_path */ 1,
&pkt_5tuple0);
acl_plugin_match_5tuple_inline (gm->
acl_plugin.p_acl_main,
value0.gc_lc_index,
gc0->gc_lc_index,
&pkt_5tuple0, is_ip60,
&action0, &acl_pos_p0,
&acl_match_p0,
@ -201,17 +241,30 @@ gbp_policy_inline (vlib_main_t * vm,
if (action0 > 0)
{
vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
gbp_rule_t *gu;
next0 =
vnet_l2_feature_next (b0,
gpm->l2_output_feat_next
[is_port_based],
(is_port_based ?
L2OUTPUT_FEAT_GBP_POLICY_PORT
:
L2OUTPUT_FEAT_GBP_POLICY_MAC));
;
vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
gu = gbp_rule_get (gc0->gc_rules[rule_match_p0]);
switch (gu->gu_action)
{
case GBP_RULE_PERMIT:
next0 = vnet_l2_feature_next
(b0,
gpm->l2_output_feat_next
[is_port_based],
(is_port_based ?
L2OUTPUT_FEAT_GBP_POLICY_PORT :
L2OUTPUT_FEAT_GBP_POLICY_MAC));
break;
case GBP_RULE_DENY:
ASSERT (0);
next0 = 0;
break;
case GBP_RULE_REDIRECT:
next0 = gbp_rule_l2_redirect (gu, b0);
break;
}
}
}
}
@ -237,8 +290,8 @@ gbp_policy_inline (vlib_main_t * vm,
vlib_add_trace (vm, node, b0, sizeof (*t));
t->src_epg = key0.gck_src;
t->dst_epg = key0.gck_dst;
t->acl_index = value0.gc_acl_index;
t->allowed = (next0 != GBP_POLICY_NEXT_DENY);
t->acl_index = (gc0 ? gc0->gc_acl_index : ~0),
t->allowed = (next0 != GBP_POLICY_NEXT_DENY);
}
/* verify speculative enqueue, maybe switch current next frame */
@ -308,15 +361,7 @@ VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
.vector_size = sizeof (u32),
.format_trace = format_gbp_policy_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = ARRAY_LEN(gbp_policy_error_strings),
.error_strings = gbp_policy_error_strings,
.n_next_nodes = GBP_POLICY_N_NEXT,
.next_nodes = {
[GBP_POLICY_NEXT_DENY] = "error-drop",
},
.sibling_of = "gbp-policy-port",
};
VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_mac_node, gbp_policy_mac);

View File

@ -236,6 +236,23 @@ typedef enum
GBP_POLICY_N_NEXT,
} gbp_policy_next_t;
always_inline u32
gbp_rule_l3_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0, int is_ip6)
{
gbp_policy_node_t pnode;
const dpo_id_t *dpo;
dpo_proto_t dproto;
pnode = (is_ip6 ? GBP_POLICY_NODE_IP6 : GBP_POLICY_NODE_IP4);
dproto = (is_ip6 ? DPO_PROTO_IP6 : DPO_PROTO_IP4);
dpo = &gu->gu_dpo[pnode][dproto];
/* The flow hash is still valid as this is a IP packet being switched */
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
return (dpo->dpoi_next_node);
}
always_inline uword
gbp_policy_dpo_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
@ -243,6 +260,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
{
gbp_main_t *gm = &gbp_main;
u32 n_left_from, next_index, *from, *to_next;
gbp_rule_t *gu;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
@ -260,10 +278,9 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
const gbp_policy_dpo_t *gpd0;
u32 bi0, next0;
gbp_contract_key_t key0;
gbp_contract_value_t value0 = {
.as_u64 = ~0,
};
gbp_contract_t *gc0;
vlib_buffer_t *b0;
index_t gci0;
bi0 = from[0];
to_next[0] = bi0;
@ -275,6 +292,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
gc0 = NULL;
gpd0 =
gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
@ -301,9 +319,9 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
}
else
{
value0.as_u64 = gbp_acl_lookup (&key0);
gci0 = gbp_contract_find (&key0);
if (~0 != value0.gc_lc_index)
if (INDEX_INVALID != gci0)
{
fa_5tuple_opaque_t pkt_5tuple0;
u8 action0 = 0;
@ -312,16 +330,17 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
/*
* tests against the ACL
*/
gc0 = gbp_contract_get (gci0);
acl_plugin_fill_5tuple_inline (gm->
acl_plugin.p_acl_main,
value0.gc_lc_index, b0,
gc0->gc_lc_index, b0,
is_ip6,
/* is_input */ 1,
/* is_l2_path */ 0,
&pkt_5tuple0);
acl_plugin_match_5tuple_inline (gm->
acl_plugin.p_acl_main,
value0.gc_lc_index,
gc0->gc_lc_index,
&pkt_5tuple0, is_ip6,
&action0, &acl_pos_p0,
&acl_match_p0,
@ -330,8 +349,23 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
if (action0 > 0)
{
vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
next0 = gpd0->gpd_dpo.dpoi_next_node;
gu = gbp_rule_get (gc0->gc_rules[rule_match_p0]);
switch (gu->gu_action)
{
case GBP_RULE_PERMIT:
next0 = gpd0->gpd_dpo.dpoi_next_node;
break;
case GBP_RULE_DENY:
ASSERT (0);
next0 = 0;
break;
case GBP_RULE_REDIRECT:
next0 = gbp_rule_l3_redirect (gu, b0, is_ip6);
break;
}
}
}
}
@ -352,7 +386,7 @@ gbp_policy_dpo_inline (vlib_main_t * vm,
tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->src_epg = key0.gck_src;
tr->dst_epg = key0.gck_dst;
tr->acl_index = value0.gc_acl_index;
tr->acl_index = (gc0 ? gc0->gc_acl_index : ~0);
tr->a_bit = vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A;
}

View File

@ -59,6 +59,10 @@ extern gbp_policy_dpo_t *gbp_policy_dpo_get (index_t index);
extern dpo_type_t gbp_policy_dpo_get_type (void);
extern vlib_node_registration_t ip4_gbp_policy_dpo_node;
extern vlib_node_registration_t ip6_gbp_policy_dpo_node;
extern vlib_node_registration_t gbp_policy_port_node;
/*
* fd.io coding-style-patch-verification: ON
*

View File

@ -66,11 +66,12 @@ gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
fib_protocol_t fproto;
index_t ggi;
ggi = gbp_endpoint_group_find_and_lock (epg_id);
ggi = gbp_endpoint_group_find (epg_id);
if (INDEX_INVALID == ggi)
return (VNET_API_ERROR_NO_SUCH_ENTRY);
gbp_endpoint_group_lock (ggi);
pool_get (gbp_recirc_pool, gr);
clib_memset (gr, 0, sizeof (*gr));
gri = gr - gbp_recirc_pool;
@ -119,10 +120,12 @@ gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
mac_address_from_bytes (&mac,
vnet_sw_interface_get_hw_address
(vnet_get_main (), gr->gr_sw_if_index));
gbp_endpoint_update (gr->gr_sw_if_index,
NULL, &mac, gr->gr_epg,
GBP_ENDPOINT_FLAG_NONE,
NULL, NULL, &gr->gr_ep);
gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_CP,
gr->gr_sw_if_index,
NULL, &mac, INDEX_INVALID,
INDEX_INVALID, gr->gr_epg,
GBP_ENDPOINT_FLAG_NONE,
NULL, NULL, &gr->gr_ep);
vnet_feature_enable_disable ("ip4-unicast",
"ip4-gbp-src-classify",
gr->gr_sw_if_index, 1, 0, 0);
@ -172,7 +175,7 @@ gbp_recirc_delete (u32 sw_if_index)
if (gr->gr_is_ext)
{
gbp_endpoint_delete (gr->gr_ep);
gbp_endpoint_unlock (GBP_ENDPOINT_SRC_CP, gr->gr_ep);
vnet_feature_enable_disable ("ip4-unicast",
"ip4-gbp-src-classify",
gr->gr_sw_if_index, 0, 0, 0);

View File

@ -62,6 +62,12 @@ vlib_log_class_t grd_logger;
#define GBP_BD_DBG(...) \
vlib_log_debug (grd_logger, __VA_ARGS__);
index_t
gbp_route_domain_index (const gbp_route_domain_t * grd)
{
return (grd - gbp_route_domain_pool);
}
gbp_route_domain_t *
gbp_route_domain_get (index_t i)
{

View File

@ -62,6 +62,7 @@ extern int gbp_route_domain_add_and_lock (u32 rd_id,
extern void gbp_route_domain_unlock (index_t grdi);
extern index_t gbp_route_domain_find_and_lock (u32 rd_id);
extern index_t gbp_route_domain_find (u32 rd_id);
extern index_t gbp_route_domain_index (const gbp_route_domain_t *);
extern int gbp_route_domain_delete (u32 rd_id);
extern gbp_route_domain_t *gbp_route_domain_get (index_t i);

View File

@ -781,7 +781,7 @@ gbp_vxlan_tunnel_del (u32 vni)
GBP_VXLAN_TUN_DBG ("del: %U", format_gbp_vxlan_tunnel,
gt - gbp_vxlan_tunnel_pool);
gbp_endpoint_flush (gt->gt_sw_if_index);
gbp_endpoint_flush (GBP_ENDPOINT_SRC_DP, gt->gt_sw_if_index);
ASSERT (0 == vec_len (gt->gt_tuns));
vec_free (gt->gt_tuns);

View File

@ -265,26 +265,24 @@ adj_nbr_add_or_lock_w_rewrite (fib_protocol_t nh_proto,
u8 *rewrite)
{
adj_index_t adj_index;
ip_adjacency_t *adj;
adj_index = adj_nbr_find(nh_proto, link_type, nh_addr, sw_if_index);
if (ADJ_INDEX_INVALID == adj_index)
{
adj = adj_nbr_alloc(nh_proto, link_type, nh_addr, sw_if_index);
ip_adjacency_t *adj;
adj = adj_nbr_alloc(nh_proto, link_type, nh_addr, sw_if_index);
adj->rewrite_header.sw_if_index = sw_if_index;
}
else
{
adj = adj_get(adj_index);
adj_index = adj_get_index(adj);
}
adj_lock(adj_get_index(adj));
adj_nbr_update_rewrite(adj_get_index(adj),
adj_lock(adj_index);
adj_nbr_update_rewrite(adj_index,
ADJ_NBR_REWRITE_FLAG_COMPLETE,
rewrite);
return (adj_get_index(adj));
return (adj_index);
}
/**

File diff suppressed because it is too large Load Diff

View File

@ -3631,14 +3631,16 @@ class VppPapiProvider(object):
""" GBP Subnet Dump """
return self.api(self.papi.gbp_subnet_dump, {})
def gbp_contract_add_del(self, is_add, src_epg, dst_epg, acl_index):
def gbp_contract_add_del(self, is_add, src_epg, dst_epg, acl_index, rules):
""" GBP contract Add/Del """
return self.api(self.papi.gbp_contract_add_del,
{'is_add': is_add,
'contract': {
'acl_index': acl_index,
'src_epg': src_epg,
'dst_epg': dst_epg}})
'dst_epg': dst_epg,
'n_rules': len(rules),
'rules': rules}})
def gbp_contract_dump(self):
""" GBP contract Dump """