/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP
+ * Copyright 2017,2020-2021 NXP
*
*/
/* L4 Type field: TCP */
#define DPAA_L4_PARSE_RESULT_TCP 0x20
-#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
/** <Maximum number of frames to be dequeued in a single rx call*/
* 0x8000 - Ethernet type
* ShimR & Logical Port ID 0x0000
*/
-#define DPAA_PARSE_MASK 0x00E044ED00800000
+#define DPAA_PARSE_MASK 0x00F044EF00800000
#define DPAA_PARSE_VLAN_MASK 0x0000000000700000
/* Parsed values (Little Endian) */
(0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
#define DPAA_PKT_TYPE_TUNNEL_6_4_TCP \
(0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+
+/* Checksum Errors */
+#define DPAA_PKT_IP_CSUM_ERR 0x0000400200000000
+#define DPAA_PKT_L4_CSUM_ERR 0x0010000000000000
+#define DPAA_PKT_TYPE_IPV4_CSUM_ERR \
+ (DPAA_PKT_IP_CSUM_ERR | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_CSUM_ERR \
+ (DPAA_PKT_IP_CSUM_ERR | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR \
+ (DPAA_PKT_L4_CSUM_ERR | DPAA_PKT_TYPE_IPV4_TCP)
+#define DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR \
+ (DPAA_PKT_L4_CSUM_ERR | DPAA_PKT_TYPE_IPV6_TCP)
+#define DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR \
+ (DPAA_PKT_L4_CSUM_ERR | DPAA_PKT_TYPE_IPV4_UDP)
+#define DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR \
+ (DPAA_PKT_L4_CSUM_ERR | DPAA_PKT_TYPE_IPV6_UDP)
+
#define DPAA_PKT_L3_LEN_SHIFT 7
/**
uint16_t vlan:1;
uint16_t ethernet:1;
#endif
- } __attribute__((__packed__));
- } __attribute__((__packed__));
+ } __rte_packed;
+ } __rte_packed;
union {
uint16_t l3r; /**< Layer 3 result */
struct {
uint16_t first_ipv6:1;
uint16_t first_ipv4:1;
#endif
- } __attribute__((__packed__));
- } __attribute__((__packed__));
+ } __rte_packed;
+ } __rte_packed;
union {
uint8_t l4r; /**< Layer 4 result */
struct{
uint8_t l4_info_err:1;
uint8_t l4_type:3;
#endif
- } __attribute__((__packed__));
- } __attribute__((__packed__));
+ } __rte_packed;
+ } __rte_packed;
uint8_t cplan; /**< Classification plan id */
uint16_t nxthdr; /**< Next Header */
uint16_t cksum; /**< Checksum */
uint8_t gre_off; /**< GRE offset */
uint8_t l4_off; /**< Layer 4 offset */
uint8_t nxthdr_off; /**< Parser end point */
-} __attribute__ ((__packed__));
+} __rte_packed;
/* The structure is the Prepended Data to the Frame which is used by FMAN */
struct annotations_t {
uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+uint16_t dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs,
+ uint16_t nb_bufs);
uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
struct rte_mbuf **bufs __rte_unused,
uint16_t nb_bufs __rte_unused);
-struct rte_mbuf *dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid);
+uint16_t dpaa_free_mbuf(const struct qm_fd *fd);
+void dpaa_rx_cb(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);
-int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
- struct qm_fd *fd,
- uint32_t bpid);
+void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs);
-enum qman_cb_dqrr_result dpaa_rx_cb(void *event,
- struct qman_portal *qm,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr,
- void **bd);
+void dpaa_rx_cb_no_prefetch(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);
#endif