ip->num = j;
}
-static inline void
-process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
- struct ipsec_traffic *traffic)
+static void
+split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
+ uint32_t i, n4, n6;
+ struct ip *ip;
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.num, MAX_PKT_BURST);
+ n4 = trf->ip4.num;
+ n6 = trf->ip6.num;
- n_ip4 = traffic->ip4.num;
- n_ip6 = traffic->ip6.num;
+ for (i = 0; i < num; i++) {
+
+ m = mb[i];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
- /* SP/ACL Inbound check ipsec and ip4 */
- for (i = 0; i < nb_pkts_in; i++) {
- m = traffic->ipsec.pkts[i];
- struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
if (ip->ip_v == IPVERSION) {
- idx = traffic->ip4.num++;
- traffic->ip4.pkts[idx] = m;
- traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
+ trf->ip4.pkts[n4] = m;
+ trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
uint8_t *, offsetof(struct ip, ip_p));
+ n4++;
} else if (ip->ip_v == IP6_VERSION) {
- idx = traffic->ip6.num++;
- traffic->ip6.pkts[idx] = m;
- traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
+ trf->ip6.pkts[n6] = m;
+ trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
uint8_t *,
offsetof(struct ip6_hdr, ip6_nxt));
+ n6++;
} else
rte_pktmbuf_free(m);
}
+ trf->ip4.num = n4;
+ trf->ip6.num = n6;
+}
+
+
+static inline void
+process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
+ struct ipsec_traffic *traffic)
+{
+ uint16_t nb_pkts_in, n_ip4, n_ip6;
+
+ n_ip4 = traffic->ip4.num;
+ n_ip6 = traffic->ip6.num;
+
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+
+ split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
+
inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
n_ip4);
}
static inline void
-drain_buffers(struct lcore_conf *qconf)
+drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
uint32_t portid;
}
}
+static inline void
+drain_crypto_buffers(struct lcore_conf *qconf)
+{
+ uint32_t i;
+ struct ipsec_ctx *ctx;
+
+ /* drain inbound buffers*/
+ ctx = &qconf->inbound;
+ for (i = 0; i != ctx->nb_qps; i++) {
+ if (ctx->tbl[i].len != 0)
+ enqueue_cop_burst(ctx->tbl + i);
+ }
+
+ /* drain outbound buffers*/
+ ctx = &qconf->outbound;
+ for (i = 0; i != ctx->nb_qps; i++) {
+ if (ctx->tbl[i].len != 0)
+ enqueue_cop_burst(ctx->tbl + i);
+ }
+}
+
+static void
+drain_inbound_crypto_queues(const struct lcore_conf *qconf,
+ struct ipsec_ctx *ctx)
+{
+ uint32_t n;
+ struct ipsec_traffic trf;
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ RTE_DIM(trf.ipsec.pkts));
+ if (n == 0)
+ return;
+
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
+
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+
+ /* process ipv4 packets */
+ inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+
+ /* process ipv6 packets */
+ inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+}
+
+static void
+drain_outbound_crypto_queues(const struct lcore_conf *qconf,
+ struct ipsec_ctx *ctx)
+{
+ uint32_t n;
+ struct ipsec_traffic trf;
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ RTE_DIM(trf.ipsec.pkts));
+ if (n == 0)
+ return;
+
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
+
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+
+ /* process ipv4 packets */
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+
+ /* process ipv6 packets */
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+}
+
/* main processing loop */
static int32_t
main_loop(__attribute__((unused)) void *dummy)
diff_tsc = cur_tsc - prev_tsc;
if (unlikely(diff_tsc > drain_tsc)) {
- drain_buffers(qconf);
+ drain_tx_buffers(qconf);
+ drain_crypto_buffers(qconf);
prev_tsc = cur_tsc;
}
- /* Read packet from RX queues */
for (i = 0; i < qconf->nb_rx_queue; ++i) {
+
+ /* Read packets from RX queues */
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
nb_rx = rte_eth_rx_burst(portid, queueid,
if (nb_rx > 0)
process_pkts(qconf, pkts, nb_rx, portid);
+
+ /* dequeue and process completed crypto-ops */
+ if (UNPROTECTED_PORT(portid))
+ drain_inbound_crypto_queues(qconf,
+ &qconf->inbound);
+ else
+ drain_outbound_crypto_queues(qconf,
+ &qconf->outbound);
}
}
}
return 0;
}
+/*
+ * queue crypto-ops into PMD queue.
+ */
+void
+enqueue_cop_burst(struct cdev_qp *cqp)
+{
+ uint32_t i, len, ret;
+
+ len = cqp->len;
+ ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
+ if (ret < len) {
+ RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ " enqueued %u crypto ops out of %u\n",
+ cqp->id, cqp->qp, ret, len);
+ /* drop packets that we fail to enqueue */
+ for (i = ret; i < len; i++)
+ rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
+ }
+ cqp->in_flight += ret;
+ cqp->len = 0;
+}
+
static inline void
enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
{
- int32_t ret = 0, i;
-
cqp->buf[cqp->len++] = cop;
- if (cqp->len == MAX_PKT_BURST) {
- int enq_size = cqp->len;
- if ((cqp->in_flight + enq_size) > MAX_INFLIGHT)
- enq_size -=
- (int)((cqp->in_flight + enq_size) - MAX_INFLIGHT);
-
- if (enq_size > 0)
- ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
- cqp->buf, enq_size);
- if (ret < cqp->len) {
- RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
- " enqueued %u crypto ops out of %u\n",
- cqp->id, cqp->qp,
- ret, cqp->len);
- for (i = ret; i < cqp->len; i++)
- rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
- }
- cqp->in_flight += ret;
- cqp->len = 0;
- }
+ if (cqp->len == MAX_PKT_BURST)
+ enqueue_cop_burst(cqp);
}
static inline void
}
}
+static inline int32_t
+ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], uint16_t max_pkts)
+{
+ int32_t nb_pkts, ret;
+ struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
+ struct rte_mbuf *pkt;
+
+ nb_pkts = 0;
+ while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
+ pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
+ rte_prefetch0(pkt);
+ priv = get_priv(pkt);
+ sa = priv->sa;
+ ret = xform_func(pkt, sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+ pkts[nb_pkts++] = pkt;
+ }
+
+ return nb_pkts;
+}
+
static inline int
ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
struct rte_mbuf *pkts[], uint16_t max_pkts)
if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
- while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
- pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
- rte_prefetch0(pkt);
- priv = get_priv(pkt);
- sa = priv->sa;
- ret = xform_func(pkt, sa, &priv->cop);
- if (unlikely(ret)) {
- rte_pktmbuf_free(pkt);
- continue;
- }
- pkts[nb_pkts++] = pkt;
- }
-
if (cqp->in_flight == 0)
continue;
ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
+ return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
+}
+
+uint16_t
+ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
+ uint16_t len)
+{
return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
}
ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
+ return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
+}
+
+uint16_t
+ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
+ uint16_t len)
+{
return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
}