21 #include <sys/ioctl.h> 31 #define foreach_memif_tx_func_error \ 32 _(NO_FREE_SLOTS, "no free tx slots") \ 33 _(ROLLBACK, "no enough space in tx buffers") 37 #define _(f,s) MEMIF_TX_ERROR_##f, 49 #ifndef CLIB_MARCH_VARIANT 53 u32 dev_instance = va_arg (*args,
u32);
67 u32 dev_instance = va_arg (*args,
u32);
68 int verbose = va_arg (*args,
int);
71 s =
format (s,
"MEMIF interface");
83 s =
format (s,
"Unimplemented...");
89 u16 buffer_offset,
u16 buffer_vec_index)
109 u16 ring_size, mask, slot, free_slots;
114 void *last_region_shm = 0;
118 mask = ring_size - 1;
129 free_slots = ring->
head - ring->
tail;
131 while (n_left && free_slots)
136 u32 bi0, dst_off, src_left, dst_left, bytes_to_copy;
137 u32 saved_ptd_copy_ops_len = _vec_len (ptd->
copy_ops);
138 u32 saved_ptd_buffers_len = _vec_len (ptd->
buffers);
139 u16 saved_slot = slot;
144 d0 = &ring->
desc[slot & mask];
150 mb0 = last_region_shm + d0->
offset;
176 d0 = &ring->
desc[slot & mask];
187 mb0 = last_region_shm + d0->
offset;
192 _vec_len (ptd->
buffers) = saved_ptd_buffers_len;
193 _vec_len (ptd->
copy_ops) = saved_ptd_copy_ops_len;
195 MEMIF_TX_ERROR_ROLLBACK, 1);
200 bytes_to_copy =
clib_min (src_left, dst_left);
204 src_off += bytes_to_copy;
205 dst_off += bytes_to_copy;
206 src_left -= bytes_to_copy;
207 dst_left -= bytes_to_copy;
230 while (n_copy_op >= 8)
272 if (n_left && n_retries--)
304 u16 slot, free_slots, n_free;
306 u16 mask = ring_size - 1;
323 while (n_left && free_slots)
326 u16 slots_in_packet = 1;
340 d0 = &ring->
desc[s0];
357 free_slots += slots_in_packet;
358 slot -= slots_in_packet;
381 if (n_left && n_retries--)
425 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
427 else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
444 if (node_index == ~0)
486 mif->
flags |= MEMIF_IF_FLAG_ADMIN_UP;
488 mif->
flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, memif_queue_t *mq, memif_per_thread_data_t *ptd)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
memif_socket_file_t * socket_files
#define CLIB_MEMORY_STORE_BARRIER()
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define clib_memcpy_fast(a, b, c)
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
static void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u16 current_length
Nbytes between current data and the end of this buffer.
uint16_t memif_region_index_t
VNET_DEVICE_CLASS_TX_FN() memif_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
vlib_main_t ** vlib_mains
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
u8 buffer_pool_index
index of buffer pool this buffer belongs.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define vec_add1_aligned(V, E, A)
Add 1 element to end of vector (alignment specified).
vnet_hw_interface_rx_mode
#define static_always_inline
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
#define VNET_DEVICE_CLASS_TX_FN(devclass)
static clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
static void memif_clear_hw_interface_counters(u32 instance)
memif_region_index_t region
memif_copy_op_t * copy_ops
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
static char * memif_tx_func_error_strings[]
u32 node_index
Node index.
static u8 * format_memif_tx_trace(u8 *s, va_list *args)
#define foreach_memif_tx_func_error
#define CLIB_PREFETCH(addr, size, type)
static clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
vl_api_vxlan_gbp_api_tunnel_mode_t mode
static clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
#define MEMIF_DESC_FLAG_NEXT
struct memif_if_t::@508 run
VNET_DEVICE_CLASS(bond_dev_class)
#define MEMIF_RING_FLAG_MASK_INT
static vlib_main_t * vlib_get_main(void)
memif_region_offset_t offset
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
memif_log2_ring_size_t log2_ring_size
static u8 * format_memif_device(u8 *s, va_list *args)
memif_per_thread_data_t * per_thread_data
static_always_inline uword memif_interface_tx_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_queue_t *mq, memif_per_thread_data_t *ptd)
memif_queue_t * rx_queues
#define CLIB_CACHE_LINE_BYTES
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
u8 * format_memif_device_name(u8 *s, va_list *args)