19 #include <rte_config.h> 21 #include <rte_ethdev.h> 28 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
31 #ifndef CLIB_MARCH_VARIANT 40 struct rte_mempool *mp, *nmp;
41 struct rte_pktmbuf_pool_private priv;
42 enum rte_iova_mode iova_mode;
47 sizeof (
struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size;
56 name =
format (name,
"vpp pool %u%c", bp->index, 0);
57 mp = rte_mempool_create_empty ((
char *) name,
vec_len (bp->buffers),
58 elt_size, 512, sizeof (priv),
64 "failed to create normal mempool for numa node %u",
70 name =
format (name,
"vpp pool %u (no cache)%c", bp->index, 0);
71 nmp = rte_mempool_create_empty ((
char *) name,
vec_len (bp->buffers),
72 elt_size, 0, sizeof (priv),
76 rte_mempool_free (mp);
79 "failed to create non-cache mempool for numa nude %u",
87 mp->pool_id = nmp->pool_id = bp->index;
89 rte_mempool_set_ops_byname (mp,
"vpp",
NULL);
90 rte_mempool_set_ops_byname (nmp,
"vpp-no-cache",
NULL);
96 rte_pktmbuf_pool_init (mp, &priv);
97 rte_pktmbuf_pool_init (nmp, &priv);
99 iova_mode = rte_eal_iova_mode ();
105 struct rte_mempool_objhdr *hdr;
108 hdr = (
struct rte_mempool_objhdr *) RTE_PTR_SUB (mb,
sizeof (*hdr));
110 hdr->iova = (iova_mode == RTE_IOVA_VA) ?
112 STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
113 STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
114 mp->populated_size++;
115 nmp->populated_size++;
120 rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
127 (buffer_mem_start, *bp->buffers,
128 0)), sizeof (
struct rte_mbuf));
140 if (rte_eth_dev_count_avail ())
150 for (i = 0; i < pm->
n_pages; i++)
152 char *va = ((
char *) pm->
base) + i * page_sz;
153 uword pa = (iova_mode == RTE_IOVA_VA) ?
160 struct rte_mempool_memhdr *memhdr;
165 memhdr->len = page_sz;
169 STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
197 struct rte_mbuf *mb = obj;
206 void *
const *obj_table,
unsigned n)
208 const int batch_size = 32;
211 u8 buffer_pool_index = mp->pool_id;
213 u32 bufs[batch_size];
215 void *
const *obj = obj_table;
236 while (n >= batch_size)
240 sizeof (
struct rte_mbuf));
243 obj_table += batch_size;
249 n,
sizeof (
struct rte_mbuf));
260 struct rte_mempool *
new,
void *obj,
263 struct rte_mbuf *mb = obj;
277 void *
const *obj_table,
282 struct rte_mempool *mp;
284 u8 buffer_pool_index = cmp->pool_id;
323 struct rte_mbuf *mb = mba[0];
327 *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1);
330 #ifdef CLIB_HAVE_VEC256 331 for (i = 1; i < 4; i++)
332 *((u8x32 *) mb +
i) = *((u8x32 *) mt +
i);
334 for (i = 2; i < 8; i++)
335 *((u8x16 *) mb +
i) = *((u8x16 *) mt +
i);
343 void **obj_table,
unsigned n)
345 const int batch_size = 32;
347 u32 bufs[batch_size], total = 0, n_alloc = 0;
348 u8 buffer_pool_index = mp->pool_id;
349 void **obj = obj_table;
352 while (n >= batch_size)
356 if (n_alloc != batch_size)
360 -(
i32)
sizeof (
struct rte_mbuf));
375 -(
i32)
sizeof (
struct rte_mbuf));
390 sizeof (
struct rte_mbuf));
401 #ifndef CLIB_MARCH_VARIANT 421 struct rte_mempool *cmp;
432 struct rte_mempool_ops ops = { };
434 strncpy (ops.name,
"vpp", 4);
440 rte_mempool_register_ops (&ops);
442 strncpy (ops.name,
"vpp-no-cache", 13);
446 rte_mempool_register_ops (&ops);
457 sizeof (
struct rte_mbuf));
#define vlib_buffer_from_rte_mbuf(x)
#define CLIB_MARCH_FN_POINTER(fn)
STATIC_ASSERT_OFFSET_OF(vlib_buffer_t, template_end, 64)
#define clib_error(format, args...)
static int dpdk_ops_vpp_alloc(struct rte_mempool *mp)
static_always_inline void dpdk_mbuf_init_from_template(struct rte_mbuf **mba, struct rte_mbuf *mt, int count)
static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool(vlib_main_t *vm, u8 buffer_pool_index)
#define VLIB_BUFFER_PRE_DATA_SIZE
vlib_buffer_main_t * buffer_main
VLIB_BUFFER_SET_EXT_HDR_SIZE(sizeof(struct rte_mempool_objhdr)+sizeof(struct rte_mbuf))
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
vlib_physmem_map_t * vlib_physmem_get_map(vlib_main_t *vm, u32 index)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
u8 buffer_pool_index
index of buffer pool this buffer belongs.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
#define static_always_inline
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
clib_error_t * dpdk_buffer_pool_init(vlib_main_t *vm, vlib_buffer_pool_t *bp)
static void dpdk_ops_vpp_free(struct rte_mempool *mp)
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
static_always_inline void dpdk_ops_vpp_enqueue_one(vlib_buffer_t *bt, void *obj)
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
#define rte_mbuf_from_vlib_buffer(x)
vlib_buffer_t buffer_template
vlib_buffer_pool_t * buffer_pools
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
#define clib_atomic_sub_fetch(a, b)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
STATIC_ASSERT(VLIB_BUFFER_PRE_DATA_SIZE==RTE_PKTMBUF_HEADROOM,"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM")
#define STATIC_ASSERT_SIZEOF_ELT(d, e, s)
static_always_inline void vlib_buffer_pool_put(vlib_main_t *vm, u8 buffer_pool_index, u32 *buffers, u32 n_buffers)
#define vec_free(V)
Free vector's memory (no header).
#define clib_warning(format, args...)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue_no_cache(struct rte_mempool *cmp, void *const *obj_table, unsigned n)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue(struct rte_mempool *mp, void *const *obj_table, unsigned n)
CLIB_MARCH_FN_REGISTRATION(dpdk_ops_vpp_enqueue)
#define VLIB_BUFFER_HDR_SIZE
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
struct rte_mbuf * dpdk_mbuf_template_by_pool_index
static_always_inline void dpdk_ops_vpp_enqueue_no_cache_one(vlib_main_t *vm, struct rte_mempool *old, struct rte_mempool *new, void *obj, vlib_buffer_t *bt)
static void * vlib_buffer_ptr_from_index(uword buffer_mem_start, u32 buffer_index, uword offset)
static void * clib_mem_alloc(uword size)
static uword pointer_to_uword(const void *p)
static vlib_main_t * vlib_get_main(void)
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
clib_error_t * dpdk_buffer_pools_create(vlib_main_t *vm)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
VLIB buffer representation.
static unsigned dpdk_ops_vpp_get_count_no_cache(const struct rte_mempool *mp)
static unsigned dpdk_ops_vpp_get_count(const struct rte_mempool *mp)
#define vec_foreach(var, vec)
Vector iterator.
static int dpdk_ops_vpp_dequeue_no_cache(struct rte_mempool *mp, void **obj_table, unsigned n)
struct rte_mempool ** dpdk_mempool_by_buffer_pool_index
#define CLIB_CACHE_LINE_BYTES
#define STATIC_ASSERT_SIZEOF(d, s)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define CLIB_MULTIARCH_FN(fn)