FD.io VPP  v19.04.1-1-ge4a0f9f
Vector Packet Processing
buffer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <unistd.h>
17 #include <errno.h>
18 
19 #include <rte_config.h>
20 #include <rte_mbuf.h>
21 #include <rte_ethdev.h>
22 #include <rte_vfio.h>
23 
24 #include <vlib/vlib.h>
25 #include <dpdk/buffer.h>
26 
27 STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
28  "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
29 
30 #ifndef CLIB_MARCH_VARIANT
31 struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0;
33 
36 {
37  uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
38  struct rte_mempool *mp, *nmp;
39  struct rte_pktmbuf_pool_private priv;
40  enum rte_iova_mode iova_mode;
41  u32 *bi;
42  u8 *name = 0;
43 
44  u32 elt_size =
45  sizeof (struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size;
46 
47  /* create empty mempools */
52 
53  /* normal mempool */
54  name = format (name, "vpp pool %u%c", bp->index, 0);
55  mp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
56  elt_size, 512, sizeof (priv),
57  bp->numa_node, 0);
58  vec_reset_length (name);
59 
60  /* non-cached mempool */
61  name = format (name, "vpp pool %u (no cache)%c", bp->index, 0);
62  nmp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
63  elt_size, 0, sizeof (priv),
64  bp->numa_node, 0);
65  vec_free (name);
66 
67  dpdk_mempool_by_buffer_pool_index[bp->index] = mp;
69 
70  mp->pool_id = nmp->pool_id = bp->index;
71 
72  rte_mempool_set_ops_byname (mp, "vpp", NULL);
73  rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL);
74 
75  /* Call the mempool priv initializer */
76  priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
78  priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
79  rte_pktmbuf_pool_init (mp, &priv);
80  rte_pktmbuf_pool_init (nmp, &priv);
81 
82  iova_mode = rte_eal_iova_mode ();
83 
84  /* populate mempool object buffer header */
85  /* *INDENT-OFF* */
86  vec_foreach (bi, bp->buffers)
87  {
88  struct rte_mempool_objhdr *hdr;
89  vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
90  struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
91  hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
92  hdr->mp = mp;
93  hdr->iova = (iova_mode == RTE_IOVA_VA) ?
94  pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb);
95  STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
96  STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
97  mp->populated_size++;
98  nmp->populated_size++;
99  }
100  /* *INDENT-ON* */
101 
102  /* call the object initializers */
103  rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
104 
105  /* *INDENT-OFF* */
106  vec_foreach (bi, bp->buffers)
107  {
108  vlib_buffer_t *b;
109  b = vlib_buffer_ptr_from_index (buffer_mem_start, *bi, 0);
110  vlib_buffer_copy_template (b, &bp->buffer_template);
111  }
112  /* *INDENT-ON* */
113 
114  /* map DMA pages if at least one physical device exists */
115  if (rte_eth_dev_count_avail ())
116  {
117  uword i;
118  size_t page_sz;
119  vlib_physmem_map_t *pm;
120  int do_vfio_map = 1;
121 
122  pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
123  page_sz = 1ULL << pm->log2_page_size;
124 
125  for (i = 0; i < pm->n_pages; i++)
126  {
127  char *va = ((char *) pm->base) + i * page_sz;
128  uword pa = (iova_mode == RTE_IOVA_VA) ?
129  pointer_to_uword (va) : pm->page_table[i];
130 
131  if (do_vfio_map &&
132  rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
133  do_vfio_map = 0;
134 
135  struct rte_mempool_memhdr *memhdr;
136  memhdr = clib_mem_alloc (sizeof (*memhdr));
137  memhdr->mp = mp;
138  memhdr->addr = va;
139  memhdr->iova = pa;
140  memhdr->len = page_sz;
141  memhdr->free_cb = 0;
142  memhdr->opaque = 0;
143 
144  STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
145  mp->nb_mem_chunks++;
146  }
147  }
148 
149  return 0;
150 }
151 
152 static int
153 dpdk_ops_vpp_alloc (struct rte_mempool *mp)
154 {
155  clib_warning ("");
156  return 0;
157 }
158 
159 static void
160 dpdk_ops_vpp_free (struct rte_mempool *mp)
161 {
162  clib_warning ("");
163 }
164 
165 #endif
166 
169 {
170  /* Only non-replicated packets (b->ref_count == 1) expected */
171 
172  struct rte_mbuf *mb = obj;
174  ASSERT (b->ref_count == 1);
177 }
178 
179 int
180 CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp,
181  void *const *obj_table, unsigned n)
182 {
183  const int batch_size = 32;
185  vlib_buffer_t bt;
186  u8 buffer_pool_index = mp->pool_id;
187  vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
188  u32 bufs[batch_size];
189  u32 n_left = n;
190  void *const *obj = obj_table;
191 
193 
194  while (n_left >= 4)
195  {
196  dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
197  dpdk_ops_vpp_enqueue_one (&bt, obj[1]);
198  dpdk_ops_vpp_enqueue_one (&bt, obj[2]);
199  dpdk_ops_vpp_enqueue_one (&bt, obj[3]);
200  obj += 4;
201  n_left -= 4;
202  }
203 
204  while (n_left)
205  {
206  dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
207  obj += 1;
208  n_left -= 1;
209  }
210 
211  while (n >= batch_size)
212  {
213  vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
214  batch_size,
215  sizeof (struct rte_mbuf));
216  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
217  n -= batch_size;
218  obj_table += batch_size;
219  }
220 
221  if (n)
222  {
223  vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
224  n, sizeof (struct rte_mbuf));
225  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n);
226  }
227 
228  return 0;
229 }
230 
232 
235  struct rte_mempool *new, void *obj,
236  vlib_buffer_t * bt)
237 {
238  struct rte_mbuf *mb = obj;
240 
241  if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0)
242  {
243  u32 bi = vlib_get_buffer_index (vm, b);
244  mb->pool = new;
246  vlib_buffer_pool_put (vm, bt->buffer_pool_index, &bi, 1);
247  return;
248  }
249 }
250 
251 int
253  void *const *obj_table,
254  unsigned n)
255 {
257  vlib_buffer_t bt;
258  struct rte_mempool *mp;
259  mp = dpdk_mempool_by_buffer_pool_index[cmp->pool_id];
260  u8 buffer_pool_index = cmp->pool_id;
261  vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
263 
264  while (n >= 4)
265  {
266  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
267  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[1], &bt);
268  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[2], &bt);
269  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[3], &bt);
270  obj_table += 4;
271  n -= 4;
272  }
273 
274  while (n)
275  {
276  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
277  obj_table += 1;
278  n -= 1;
279  }
280 
281  return 0;
282 }
283 
285 
286 int
287 CLIB_MULTIARCH_FN (dpdk_ops_vpp_dequeue) (struct rte_mempool * mp,
288  void **obj_table, unsigned n)
289 {
290  const int batch_size = 32;
292  u32 bufs[batch_size], total = 0, n_alloc = 0;
293  u8 buffer_pool_index = mp->pool_id;
294  void **obj = obj_table;
295 
296  while (n >= batch_size)
297  {
298  n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, batch_size,
299  buffer_pool_index);
300  if (n_alloc != batch_size)
301  goto alloc_fail;
302 
303  vlib_get_buffers_with_offset (vm, bufs, obj, batch_size,
304  -(i32) sizeof (struct rte_mbuf));
305  total += batch_size;
306  obj += batch_size;
307  n -= batch_size;
308  }
309 
310  if (n)
311  {
312  n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, n, buffer_pool_index);
313 
314  if (n_alloc != n)
315  goto alloc_fail;
316 
317  vlib_get_buffers_with_offset (vm, bufs, obj, n,
318  -(i32) sizeof (struct rte_mbuf));
319  }
320 
321  return 0;
322 
323 alloc_fail:
324  /* dpdk doesn't support partial alloc, so we need to return what we
325  already got */
326  if (n_alloc)
327  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n_alloc);
328  obj = obj_table;
329  while (total)
330  {
331  vlib_get_buffer_indices_with_offset (vm, obj, bufs, batch_size,
332  sizeof (struct rte_mbuf));
333  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
334 
335  obj += batch_size;
336  total -= batch_size;
337  }
338  return -ENOENT;
339 }
340 
342 
343 #ifndef CLIB_MARCH_VARIANT
344 
345 static int
346 dpdk_ops_vpp_dequeue_no_cache (struct rte_mempool *mp, void **obj_table,
347  unsigned n)
348 {
349  clib_error ("bug");
350  return 0;
351 }
352 
353 static unsigned
354 dpdk_ops_vpp_get_count (const struct rte_mempool *mp)
355 {
356  clib_warning ("");
357  return 0;
358 }
359 
360 static unsigned
361 dpdk_ops_vpp_get_count_no_cache (const struct rte_mempool *mp)
362 {
363  struct rte_mempool *cmp;
365  return dpdk_ops_vpp_get_count (cmp);
366 }
367 
368 clib_error_t *
370 {
371  clib_error_t *err;
372  vlib_buffer_pool_t *bp;
373 
374  struct rte_mempool_ops ops = { };
375 
376  strncpy (ops.name, "vpp", 4);
377  ops.alloc = dpdk_ops_vpp_alloc;
378  ops.free = dpdk_ops_vpp_free;
379  ops.get_count = dpdk_ops_vpp_get_count;
382  rte_mempool_register_ops (&ops);
383 
384  strncpy (ops.name, "vpp-no-cache", 13);
385  ops.get_count = dpdk_ops_vpp_get_count_no_cache;
387  ops.dequeue = dpdk_ops_vpp_dequeue_no_cache;
388  rte_mempool_register_ops (&ops);
389 
390  /* *INDENT-OFF* */
392  if (bp->start && (err = dpdk_buffer_pool_init (vm, bp)))
393  return err;
394  /* *INDENT-ON* */
395  return 0;
396 }
397 
398 VLIB_BUFFER_SET_EXT_HDR_SIZE (sizeof (struct rte_mempool_objhdr) +
399  sizeof (struct rte_mbuf));
400 
401 #endif
402 
403 /** @endcond */
404 /*
405  * fd.io coding-style-patch-verification: ON
406  *
407  * Local Variables:
408  * eval: (c-set-style "gnu")
409  * End:
410  */
#define vlib_buffer_from_rte_mbuf(x)
Definition: buffer.h:20
#define CLIB_MARCH_FN_POINTER(fn)
Definition: cpu.h:87
#define clib_error(format, args...)
Definition: error.h:62
static int dpdk_ops_vpp_alloc(struct rte_mempool *mp)
Definition: buffer.c:153
static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool(vlib_main_t *vm, u8 buffer_pool_index)
Definition: buffer_funcs.h:478
#define NULL
Definition: clib.h:58
#define VLIB_BUFFER_PRE_DATA_SIZE
Definition: buffer.h:51
int i
vlib_buffer_main_t * buffer_main
Definition: main.h:129
uword * page_table
Definition: physmem.h:51
VLIB_BUFFER_SET_EXT_HDR_SIZE(sizeof(struct rte_mempool_objhdr)+sizeof(struct rte_mbuf))
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:178
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
vlib_physmem_map_t * vlib_physmem_get_map(vlib_main_t *vm, u32 index)
Definition: physmem.c:87
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
unsigned char u8
Definition: types.h:56
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define static_always_inline
Definition: clib.h:99
clib_error_t * dpdk_buffer_pool_init(vlib_main_t *vm, vlib_buffer_pool_t *bp)
Definition: buffer.c:35
unsigned int u32
Definition: types.h:88
static void dpdk_ops_vpp_free(struct rte_mempool *mp)
Definition: buffer.c:160
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:257
static_always_inline void dpdk_ops_vpp_enqueue_one(vlib_buffer_t *bt, void *obj)
Definition: buffer.c:168
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:145
#define rte_mbuf_from_vlib_buffer(x)
Definition: buffer.h:19
vlib_buffer_t buffer_template
Definition: buffer.h:401
vlib_buffer_pool_t * buffer_pools
Definition: buffer.h:413
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:276
u8 name[64]
Definition: memclnt.api:152
#define clib_atomic_sub_fetch(a, b)
Definition: atomics.h:31
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
STATIC_ASSERT(VLIB_BUFFER_PRE_DATA_SIZE==RTE_PKTMBUF_HEADROOM,"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM")
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline void vlib_buffer_pool_put(vlib_main_t *vm, u8 buffer_pool_index, u32 *buffers, u32 n_buffers)
Definition: buffer_funcs.h:680
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
#define clib_warning(format, args...)
Definition: error.h:59
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue_no_cache(struct rte_mempool *cmp, void *const *obj_table, unsigned n)
Definition: buffer.c:252
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue(struct rte_mempool *mp, void *const *obj_table, unsigned n)
Definition: buffer.c:180
CLIB_MARCH_FN_REGISTRATION(dpdk_ops_vpp_enqueue)
#define VLIB_BUFFER_HDR_SIZE
Definition: buffer.h:194
signed int i32
Definition: types.h:77
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:523
#define ASSERT(truth)
static_always_inline void dpdk_ops_vpp_enqueue_no_cache_one(vlib_main_t *vm, struct rte_mempool *old, struct rte_mempool *new, void *obj, vlib_buffer_t *bt)
Definition: buffer.c:234
static void * vlib_buffer_ptr_from_index(uword buffer_mem_start, u32 buffer_index, uword offset)
Definition: buffer_funcs.h:71
static void * clib_mem_alloc(uword size)
Definition: mem.h:132
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
clib_error_t * dpdk_buffer_pools_create(vlib_main_t *vm)
Definition: buffer.c:369
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
Definition: buffer.c:32
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
uword buffer_mem_start
Definition: buffer.h:411
static unsigned dpdk_ops_vpp_get_count_no_cache(const struct rte_mempool *mp)
Definition: buffer.c:361
static unsigned dpdk_ops_vpp_get_count(const struct rte_mempool *mp)
Definition: buffer.c:354
#define vec_foreach(var, vec)
Vector iterator.
static int dpdk_ops_vpp_dequeue_no_cache(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: buffer.c:346
struct rte_mempool ** dpdk_mempool_by_buffer_pool_index
Definition: buffer.c:31
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: buffer.c:287
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:56