28 #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET) 30 #define FLOW_IS_IPV4_CLASS(f) \ 31 ((f->type == VNET_FLOW_TYPE_IP4) || \ 32 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \ 33 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ 34 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \ 35 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \ 36 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \ 37 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \ 38 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \ 39 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH)) 41 #define FLOW_IS_IPV6_CLASS(f) \ 42 ((f->type == VNET_FLOW_TYPE_IP6) || \ 43 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \ 44 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \ 45 (f->type == VNET_FLOW_TYPE_IP6_VXLAN)) 48 #define FLOW_IS_L3_TYPE(f) \ 49 ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6)) 52 #define FLOW_IS_L4_TYPE(f) \ 53 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \ 54 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \ 55 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ 56 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED)) 59 #define FLOW_IS_L4_TUNNEL_TYPE(f) \ 60 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \ 61 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \ 62 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \ 63 (f->type == VNET_FLOW_TYPE_IP4_GTPU)) 67 u32 in_len,
void *out,
u32 out_len)
69 u32 dev_instance = *(
u32 *) vc_hdl;
88 avf_log_err (ad,
"unsupported avf virtual channel opcode %u\n",
112 u16 src_port_mask = 0, dst_port_mask = 0;
118 int action_count = 0;
143 } flow_class = FLOW_UNKNOWN_CLASS;
146 flow_class = FLOW_ETHERNET_CLASS;
148 flow_class = FLOW_IPV4_CLASS;
150 flow_class = FLOW_IPV6_CLASS;
152 return VNET_FLOW_ERROR_NOT_SUPPORTED;
157 rv = VNET_FLOW_ERROR_INTERNAL;
162 vc_ctx.
vc_hdl = &dev_instance;
166 clib_memset (avf_actions, 0,
sizeof (avf_actions));
170 avf_items[layer].
spec = NULL;
171 avf_items[layer].
mask = NULL;
174 if (flow_class == FLOW_IPV4_CLASS)
176 vnet_flow_ip4_t *ip4_ptr = &f->ip4;
180 avf_items[layer].
spec = &ip4_spec;
181 avf_items[layer].
mask = &ip4_mask;
184 if ((!ip4_ptr->src_addr.mask.as_u32) &&
185 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
191 ip4_spec.
src_addr = ip4_ptr->src_addr.addr.as_u32;
192 ip4_mask.src_addr = ip4_ptr->src_addr.mask.as_u32;
194 ip4_spec.
dst_addr = ip4_ptr->dst_addr.addr.as_u32;
195 ip4_mask.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
198 ip4_mask.next_proto_id = ip4_ptr->protocol.mask;
203 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
205 src_port = ip4_n_ptr->src_port.port;
206 dst_port = ip4_n_ptr->dst_port.port;
207 src_port_mask = ip4_n_ptr->src_port.mask;
208 dst_port_mask = ip4_n_ptr->dst_port.mask;
211 protocol = ip4_ptr->protocol.prot;
213 else if (flow_class == FLOW_IPV6_CLASS)
215 vnet_flow_ip6_t *ip6_ptr = &f->ip6;
219 avf_items[layer].
spec = &ip6_spec;
220 avf_items[layer].
mask = &ip6_mask;
225 (!ip6_ptr->protocol.mask))
232 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
233 clib_memcpy (ip6_mask.src_addr, &ip6_ptr->src_addr.mask,
234 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
236 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
237 clib_memcpy (ip6_mask.dst_addr, &ip6_ptr->dst_addr.mask,
238 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
239 ip6_spec.
proto = ip6_ptr->protocol.prot;
240 ip6_mask.proto = ip6_ptr->protocol.mask;
245 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
247 src_port = ip6_n_ptr->src_port.port;
248 dst_port = ip6_n_ptr->dst_port.port;
249 src_port_mask = ip6_n_ptr->src_port.mask;
250 dst_port_mask = ip6_n_ptr->dst_port.mask;
253 protocol = ip6_ptr->protocol.prot;
262 case IP_PROTOCOL_L2TP:
264 avf_items[layer].
spec = &l2tpv3_spec;
265 avf_items[layer].
mask = &l2tpv3_mask;
268 vnet_flow_ip4_l2tpv3oip_t *l2tph = &f->ip4_l2tpv3oip;
269 l2tpv3_spec.
session_id = clib_host_to_net_u32 (l2tph->session_id);
270 l2tpv3_mask.session_id = ~0;
273 case IP_PROTOCOL_IPSEC_ESP:
275 avf_items[layer].
spec = &esp_spec;
276 avf_items[layer].
mask = &esp_mask;
279 vnet_flow_ip4_ipsec_esp_t *esph = &f->ip4_ipsec_esp;
280 esp_spec.
spi = clib_host_to_net_u32 (esph->spi);
284 case IP_PROTOCOL_IPSEC_AH:
286 avf_items[layer].
spec = &ah_spec;
287 avf_items[layer].
mask = &ah_mask;
290 vnet_flow_ip4_ipsec_ah_t *ah = &f->ip4_ipsec_ah;
291 ah_spec.
spi = clib_host_to_net_u32 (ah->spi);
295 case IP_PROTOCOL_TCP:
297 avf_items[layer].
spec = &tcp_spec;
298 avf_items[layer].
mask = &tcp_mask;
303 tcp_spec.
src_port = clib_host_to_net_u16 (src_port);
304 tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
309 tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
313 case IP_PROTOCOL_UDP:
315 avf_items[layer].
spec = &udp_spec;
316 avf_items[layer].
mask = &udp_mask;
321 udp_spec.
src_port = clib_host_to_net_u16 (src_port);
322 udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
327 udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
331 if (f->
type == VNET_FLOW_TYPE_IP4_GTPU)
334 avf_items[layer].
spec = >p_spec;
335 avf_items[layer].
mask = >p_mask;
338 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
339 gtp_spec.
teid = clib_host_to_net_u32 (gu->teid);
345 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
356 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
362 if (f->
actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
365 avf_actions[action_count].
conf = &act_q;
372 if (f->
actions & VNET_FLOW_ACTION_DROP)
375 avf_actions[action_count].
conf = NULL;
379 rv = VNET_FLOW_ERROR_INTERNAL;
391 avf_actions[action_count].
conf = NULL;
397 if (f->
actions & VNET_FLOW_ACTION_MARK)
400 avf_actions[action_count].
conf = &act_msk;
414 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
423 avf_log_err (ad,
"avf fdir rule create failed: %s",
425 rv = VNET_FLOW_ERROR_INTERNAL;
439 u32 flow_index,
uword *private_data)
447 if ((ad->
cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
449 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
460 (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
461 VNET_FLOW_ACTION_BUFFER_ADVANCE))
473 if (flow->
actions & VNET_FLOW_ACTION_MARK)
475 if (flow->
actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
477 if (flow->
actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
480 if ((ad->
flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
482 ad->
flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
490 case VNET_FLOW_TYPE_IP4:
491 case VNET_FLOW_TYPE_IP6:
492 case VNET_FLOW_TYPE_IP4_N_TUPLE:
493 case VNET_FLOW_TYPE_IP6_N_TUPLE:
494 case VNET_FLOW_TYPE_IP4_GTPU:
495 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
496 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
497 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
502 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
513 ctx.
vc_hdl = &dev_instance;
518 return VNET_FLOW_ERROR_INTERNAL;
530 goto disable_rx_offload;
533 return VNET_FLOW_ERROR_NOT_SUPPORTED;
551 if ((ad->
flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
554 ad->
flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
u32 id
Integer value to return with packets.
const void * mask
Bit-mask applied to spec and last.
vl_api_ip_port_and_mask_t dst_port
vl_api_ip_proto_t protocol
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
Matches a L2TPv3 over IP header.
int avf_fdir_parse_action(const struct avf_flow_action actions[], struct avf_fdir_conf *rcfg, struct avf_flow_error *error)
u16 dst_port
TCP destination port.
int avf_fdir_rcfg_create(struct avf_fdir_conf **rcfg, int tunnel_level, u16 vsi, u16 nrxq)
Create a rule cfg object.
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
clib_error_t * avf_program_flow(u32 dev_instance, int is_add, u8 *rule, u32 rule_len, u8 *program_status, u32 status_len)
static int avf_flow_add(u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
#define clib_memcpy(d, s, n)
u16 src_port
UDP source port.
Verbose error structure definition.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 proto
Protocol, next header.
const void * conf
Pointer to action configuration object.
u32 spi
Security Parameters Index.
int avf_flow_ops_fn(vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
u8 dst_addr[16]
IP address of destination host(s).
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
#define FLOW_IS_IPV6_CLASS(f)
const void * spec
Pointer to item specification structure.
u16 dst_port
UDP destination port.
#define pool_put(P, E)
Free an object E in pool P.
int avf_fdir_rcfg_destroy(struct avf_fdir_conf *rcfg)
Destroy a rule cfg object.
vnet_flow_t * vnet_get_flow(u32 flow_index)
u16 src_port
TCP source port.
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
vl_api_ip_port_and_mask_t src_port
avf_flow_entry_t * flow_entries
#define FLOW_IS_IPV4_CLASS(f)
#define FLOW_IS_L3_TYPE(f)
#define avf_log_debug(dev, f,...)
static uword ip6_address_is_zero(const ip6_address_t *a)
u32 session_id
Session ID.
#define FLOW_IS_L4_TUNNEL_TYPE(f)
u8 next_proto_id
protocol ID
int avf_fdir_vc_op_callback(void *vc_hdl, enum virthnl_adv_ops vc_op, void *in, u32 in_len, void *out, u32 out_len)
u8 src_addr[16]
IP address of source host.
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
u32 dst_addr
destination address
#define FLOW_IS_ETHERNET_CLASS(f)
u16 index
Queue index to use.
enum virtchnl_proto_hdr_type type
Item type.
#define pool_put_index(p, i)
Free pool element with given index.
char * avf_fdir_prgm_error_decode(int err_no)
u32 teid
Tunnel endpoint identifier.
avf_flow_lookup_entry_t * flow_lookup_entries
int avf_fdir_rule_create(struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
Create a flow rule, a FDIR rule is expected to be programmed into hardware if return success...
struct avf_fdir_conf * rcfg
#define clib_error_free(e)
u32 redirect_device_input_next_index
int avf_fdir_parse_pattern(struct avf_fdir_conf *rcfg, struct avf_flow_item avf_items[], struct avf_flow_error *error)
#define FLOW_IS_L4_TYPE(f)
int avf_fdir_rule_destroy(struct avf_fdir_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
Destroy a flow rule.
const char * message
Human-readable error message.
#define avf_log_err(dev, f,...)
u32 src_addr
source address
#define CLIB_CACHE_LINE_BYTES
enum virtchnl_action type
Action type.
__clib_export u8 * format_clib_error(u8 *s, va_list *va)
Match IP Authentication Header (AH), RFC 4302.
static uword pool_elts(void *v)
Number of active elements in a pool.