FD.io VPP  v19.04-6-g6f05f72
Vector Packet Processing
ipsec.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __DPDK_IPSEC_H__
16 #define __DPDK_IPSEC_H__
17 
18 #include <vnet/vnet.h>
19 #include <vppinfra/cache.h>
20 #include <vnet/ipsec/ipsec.h>
21 
22 #undef always_inline
23 #include <rte_config.h>
24 #include <rte_crypto.h>
25 #include <rte_cryptodev.h>
26 
27 #if CLIB_DEBUG > 0
28 #define always_inline static inline
29 #else
30 #define always_inline static inline __attribute__ ((__always_inline__))
31 #endif
32 
33 #define foreach_dpdk_crypto_input_next \
34  _(DROP, "error-drop") \
35  _(IP4_LOOKUP, "ip4-lookup") \
36  _(IP6_LOOKUP, "ip6-lookup") \
37  _(INTERFACE_OUTPUT, "interface-output") \
38  _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \
39  _(DECRYPT6_POST, "dpdk-esp6-decrypt-post")
40 
41 typedef enum
42 {
43 #define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f,
45 #undef _
48 
49 #define MAX_QP_PER_LCORE 16
50 
51 typedef struct
52 {
54  u32 iv[2];
57 
58 typedef struct
59 {
62  dpdk_gcm_cnt_blk cb __attribute__ ((aligned (16)));
63  u8 aad[16];
64  u8 icv[32];
66 
67 typedef struct
68 {
70  struct rte_crypto_op **ops;
71  u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
72  u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
74 } crypto_worker_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
75 
76 typedef struct
77 {
78  CLIB_ALIGN_MARK (pad, 8); /* align up to 8 bytes for 32bit builds */
79  char *name;
80  enum rte_crypto_sym_xform_type type;
88 } crypto_alg_t;
89 
90 typedef struct
91 {
94  u8 cipher_support[IPSEC_CRYPTO_N_ALG];
95  u8 auth_support[IPSEC_INTEG_N_ALG];
99  const char *name;
102 } crypto_dev_t;
103 
104 typedef struct
105 {
106  const char *name;
108 } crypto_drv_t;
109 
110 typedef struct
111 {
113  u8 remove;
120  u16 __unused;
121  struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
123 } crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
124 
125 typedef struct
126 {
128  struct rte_cryptodev_sym_session *session;
130 
131 typedef struct
132 {
133  CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
134  struct rte_cryptodev_sym_session *session;
137 
138 typedef struct
139 {
140  /* Required for vec_validate_aligned */
141  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
142  struct rte_mempool *crypto_op;
143  struct rte_mempool *session_h;
144  struct rte_mempool **session_drv;
152 } crypto_data_t;
153 
154 typedef struct
155 {
163  u64 session_timeout; /* nsec */
166 
168 
169 static const u8 pad_data[] =
170  { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 };
171 
172 void crypto_auto_placement (void);
173 
174 clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session,
175  u32 sa_idx, crypto_resource_t * res,
176  crypto_worker_main_t * cwm, u8 is_outbound);
177 
180 {
181  const u32 align = 4;
182  u32 op_size =
183  sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
184 
185  return ((op_size + align - 1) & ~(align - 1)) + sizeof (dpdk_op_priv_t);
186 }
187 
190 {
191  const u32 align = 16;
192  u32 offset;
193 
194  offset = sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
195  offset = (offset + align - 1) & ~(align - 1);
196 
197  return offset;
198 }
199 
201 crypto_op_get_priv (struct rte_crypto_op * op)
202 {
203  return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ());
204 }
205 
206 
208 add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session,
209  crypto_data_t * data, u32 drv_id, u32 sa_idx)
210 {
214  sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
215  sbd->dev_mask |= 1L << drv_id;
216  sbd->session = session;
217 }
218 
219 static_always_inline struct rte_cryptodev_sym_session *
221 {
222  crypto_session_by_drv_t *sess_by_sa;
223  if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx)
224  return NULL;
225  sess_by_sa =
227  return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL;
228 }
229 
231 crypto_get_session (struct rte_cryptodev_sym_session ** session,
232  u32 sa_idx,
233  crypto_resource_t * res,
234  crypto_worker_main_t * cwm, u8 is_outbound)
235 {
238  struct rte_cryptodev_sym_session *sess;
239 
240  data = vec_elt_at_index (dcm->data, res->numa);
241  sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx);
242 
243  if (PREDICT_FALSE (!sess))
244  return create_sym_session (session, sa_idx, res, cwm, is_outbound);
245 
246  session[0] = sess;
247 
248  return NULL;
249 }
250 
253 {
254  u16 cipher_res = cwm->cipher_resource_idx[sa->crypto_alg];
255  u16 auth_res = cwm->auth_resource_idx[sa->integ_alg];
256  u8 is_aead;
257 
258  /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */
259 
260  is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) ||
261  (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) ||
262  (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256));
263 
264  if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE)
265  return auth_res;
266 
267  if (cipher_res == auth_res)
268  return cipher_res;
269 
270  if (is_aead)
271  return cipher_res;
272 
273  return (u16) ~ 0;
274 }
275 
277 crypto_alloc_ops (u8 numa, struct rte_crypto_op ** ops, u32 n)
278 {
280  crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
281  i32 ret;
282 
283  ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n);
284 
285  /* *INDENT-OFF* */
286  data->crypto_op_get_failed += ! !ret;
287  /* *INDENT-ON* */
288 
289  return ret;
290 }
291 
293 crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n)
294 {
296  crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
297 
298  if (!n)
299  return;
300 
301  rte_mempool_put_bulk (data->crypto_op, (void **) ops, n);
302 }
303 
306  u32 node_index, u32 error, u8 numa)
307 {
309  crypto_resource_t *res;
310  u16 *res_idx;
311 
312  /* *INDENT-OFF* */
313  vec_foreach (res_idx, cwm->resource_idx)
314  {
315  u16 enq;
316  res = vec_elt_at_index (dcm->resource, res_idx[0]);
317 
318  if (!res->n_ops)
319  continue;
320 
321  enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id,
322  res->ops, res->n_ops);
323  res->inflights += enq;
324 
325  if (PREDICT_FALSE (enq < res->n_ops))
326  {
327  crypto_free_ops (numa, &res->ops[enq], res->n_ops - enq);
328  vlib_buffer_free (vm, &res->bi[enq], res->n_ops - enq);
329 
330  vlib_node_increment_counter (vm, node_index, error,
331  res->n_ops - enq);
332  }
333  res->n_ops = 0;
334  }
335  /* *INDENT-ON* */
336 }
337 
339 crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi)
340 {
341  icb->salt = salt;
342  icb->iv[0] = seq;
343  icb->iv[1] = seq_hi;
344 }
345 
347 crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
348  struct rte_crypto_op *op, void *session,
349  u32 cipher_off, u32 cipher_len,
350  u32 auth_off, u32 auth_len,
351  u8 * aad, u8 * digest, u64 digest_paddr)
352 {
353  struct rte_crypto_sym_op *sym_op;
354 
355  sym_op = (struct rte_crypto_sym_op *) (op + 1);
356 
357  sym_op->m_src = mb0;
358  sym_op->session = session;
359 
360  if (is_aead)
361  {
362  sym_op->aead.data.offset = cipher_off;
363  sym_op->aead.data.length = cipher_len;
364 
365  sym_op->aead.aad.data = aad;
366  sym_op->aead.aad.phys_addr =
367  op->phys_addr + (uintptr_t) aad - (uintptr_t) op;
368 
369  sym_op->aead.digest.data = digest;
370  sym_op->aead.digest.phys_addr = digest_paddr;
371  }
372  else
373  {
374  sym_op->cipher.data.offset = cipher_off;
375  sym_op->cipher.data.length = cipher_len;
376 
377  sym_op->auth.data.offset = auth_off;
378  sym_op->auth.data.length = auth_len;
379 
380  sym_op->auth.digest.data = digest;
381  sym_op->auth.digest.phys_addr = digest_paddr;
382  }
383 }
384 
385 #endif /* __DPDK_IPSEC_H__ */
386 
387 /*
388  * fd.io coding-style-patch-verification: ON
389  *
390  * Local Variables:
391  * eval: (c-set-style "gnu")
392  * End:
393  */
u32 alg
Definition: ipsec.h:81
static_always_inline void crypto_op_setup(u8 is_aead, struct rte_mbuf *mb0, struct rte_crypto_op *op, void *session, u32 cipher_off, u32 cipher_len, u32 auth_off, u32 auth_len, u8 *aad, u8 *digest, u64 digest_paddr)
Definition: ipsec.h:347
u8 pad[3]
log2 (size of the packing page block)
Definition: bihash_doc.h:61
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
clib_error_t * create_sym_session(struct rte_cryptodev_sym_session **session, u32 sa_idx, crypto_resource_t *res, crypto_worker_main_t *cwm, u8 is_outbound)
Definition: ipsec.c:321
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:865
#define foreach_dpdk_crypto_input_next
Definition: ipsec.h:33
u8 numa
Definition: ipsec.h:97
unsigned long u64
Definition: types.h:89
u64 crypto_op_get_failed
Definition: ipsec.h:147
#define NULL
Definition: clib.h:58
u64 session_h_failed
Definition: ipsec.h:148
static_always_inline i32 crypto_alloc_ops(u8 numa, struct rte_crypto_op **ops, u32 n)
Definition: ipsec.h:277
ipsec_integ_alg_t integ_alg
Definition: ipsec_sa.h:153
static_always_inline void crypto_set_icb(dpdk_gcm_cnt_blk *icb, u32 salt, u32 seq, u32 seq_hi)
Definition: ipsec.h:339
static_always_inline u32 crypto_op_get_priv_offset(void)
Definition: ipsec.h:189
struct rte_cryptodev_sym_session * session
Definition: ipsec.h:128
u8 disabled
Definition: ipsec.h:86
u8 data[128]
Definition: ipsec.api:248
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
unsigned char u8
Definition: types.h:56
static_always_inline void crypto_enqueue_ops(vlib_main_t *vm, crypto_worker_main_t *cwm, u32 node_index, u32 error, u8 numa)
Definition: ipsec.h:305
dpdk_crypto_main_t dpdk_crypto_main
Definition: ipsec.c:25
u32 bi[VLIB_FRAME_SIZE]
Definition: ipsec.h:122
u16 * devs
Definition: ipsec.h:107
u16 * resource_idx
Definition: ipsec.h:69
static_always_inline clib_error_t * crypto_get_session(struct rte_cryptodev_sym_session **session, u32 sa_idx, crypto_resource_t *res, crypto_worker_main_t *cwm, u8 is_outbound)
Definition: ipsec.h:231
static_always_inline void crypto_free_ops(u8 numa, struct rte_crypto_op **ops, u32 n)
Definition: ipsec.h:293
#define static_always_inline
Definition: clib.h:99
u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG]
Definition: ipsec.h:71
u8 resources
Definition: ipsec.h:87
crypto_drv_t * drv
Definition: ipsec.h:162
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 drv_id
Definition: ipsec.h:96
u16 * free_resources
Definition: ipsec.h:92
unsigned int u32
Definition: types.h:88
crypto_alg_t * auth_algs
Definition: ipsec.h:160
#define VLIB_FRAME_SIZE
Definition: node.h:376
u32 iv[2]
Definition: ipsec.h:54
u8 trunc_size
Definition: ipsec.h:84
static_always_inline void add_session_by_drv_and_sa_idx(struct rte_cryptodev_sym_session *session, crypto_data_t *data, u32 drv_id, u32 sa_idx)
Definition: ipsec.h:208
u32 next
Definition: ipsec.h:60
struct rte_cryptodev_sym_session * session
Definition: ipsec.h:134
void crypto_auto_placement(void)
Definition: ipsec.c:752
clib_spinlock_t lockp
Definition: ipsec.h:151
u16 * used_resources
Definition: ipsec.h:93
static u8 iv[]
Definition: aes_cbc.c:24
crypto_alg_t * cipher_algs
Definition: ipsec.h:159
unsigned short u16
Definition: types.h:57
const char * name
Definition: ipsec.h:106
u64 features
Definition: ipsec.h:101
#define PREDICT_FALSE(x)
Definition: clib.h:111
u64 session_timeout
Definition: ipsec.h:163
static_always_inline u32 crypto_op_len(void)
Definition: ipsec.h:179
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1180
struct rte_mempool ** session_drv
Definition: ipsec.h:144
crypto_session_by_drv_t * session_by_drv_id_and_sa_index
Definition: ipsec.h:150
vlib_main_t * vm
Definition: buffer.c:312
crypto_session_disposal_t * session_disposal
Definition: ipsec.h:145
dpdk_crypto_input_next_t
Definition: ipsec.h:41
u16 id
Definition: ipsec.h:98
u8 iv_len
Definition: ipsec.h:83
signed int i32
Definition: types.h:77
struct rte_mempool * crypto_op
Definition: ipsec.h:142
u32 max_qp
Definition: ipsec.h:100
crypto_worker_main_t * workers_main
Definition: ipsec.h:156
u8 boundary
Definition: ipsec.h:85
crypto_resource_t * resource
Definition: ipsec.h:158
struct rte_mempool * session_h
Definition: ipsec.h:143
char * name
Definition: ipsec.h:79
#define CLIB_ALIGN_MARK(name, alignment)
Definition: clib.h:85
u64 * session_drv_failed
Definition: ipsec.h:149
u64 uword
Definition: types.h:112
uword * session_by_sa_index
Definition: ipsec.h:146
const char * name
Definition: ipsec.h:99
struct rte_crypto_op ** ops
Definition: ipsec.h:70
struct clib_bihash_value offset
template key/value backing page structure
crypto_dev_t * dev
Definition: ipsec.h:157
static_always_inline u16 get_resource(crypto_worker_main_t *cwm, ipsec_sa_t *sa)
Definition: ipsec.h:252
crypto_data_t * data
Definition: ipsec.h:161
ipsec_crypto_alg_t crypto_alg
Definition: ipsec_sa.h:150
u16 auth_resource_idx[IPSEC_INTEG_N_ALG]
Definition: ipsec.h:72
#define vec_foreach(var, vec)
Vector iterator.
static_always_inline struct rte_cryptodev_sym_session * get_session_by_drv_and_sa_idx(crypto_data_t *data, u32 drv_id, u32 sa_idx)
Definition: ipsec.h:220
u8 key_len
Definition: ipsec.h:82
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
struct rte_crypto_op * ops[VLIB_FRAME_SIZE]
Definition: ipsec.h:121
static_always_inline dpdk_op_priv_t * crypto_op_get_priv(struct rte_crypto_op *op)
Definition: ipsec.h:201