1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
15 * Initialize Hash Filters
17 int cxgbe_init_hash_filter(struct adapter *adap)
19 unsigned int n_user_filters;
20 unsigned int user_filter_perc;
22 u32 params[7], val[7];
24 #define FW_PARAM_DEV(param) \
25 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
26 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
28 #define FW_PARAM_PFVF(param) \
29 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
30 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
31 V_FW_PARAMS_PARAM_Y(0) | \
32 V_FW_PARAMS_PARAM_Z(0))
34 params[0] = FW_PARAM_DEV(NTID);
35 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
39 adap->tids.ntids = val[0];
40 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
42 user_filter_perc = 100;
43 n_user_filters = mult_frac(adap->tids.nftids,
47 adap->tids.nftids = n_user_filters;
48 adap->params.hash_filter = 1;
53 * Validate if the requested filter specification can be set by checking
54 * if the requested features have been enabled
56 int cxgbe_validate_filter(struct adapter *adapter,
57 struct ch_filter_specification *fs)
62 * Check for unconfigured fields being used.
64 fconf = adapter->params.tp.vlan_pri_map;
67 (fs->val._field || fs->mask._field)
68 #define U(_mask, _field) \
69 (!(fconf & (_mask)) && S(_field))
71 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
72 U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx))
79 * If the user is requesting that the filter action loop
80 * matching packets back out one of our ports, make sure that
81 * the egress port is in range.
83 if (fs->action == FILTER_SWITCH &&
84 fs->eport >= adapter->params.nports)
88 * Don't allow various trivially obvious bogus out-of-range
91 if (fs->val.iport >= adapter->params.nports)
94 if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
97 if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
104 * Get the queue to which the traffic must be steered to.
106 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
107 struct ch_filter_specification *fs)
109 struct port_info *pi = ethdev2pinfo(dev);
110 struct adapter *adapter = pi->adapter;
114 * If the user has requested steering matching Ingress Packets
115 * to a specific Queue Set, we need to make sure it's in range
116 * for the port and map that into the Absolute Queue ID of the
117 * Queue Set's Response Queue.
123 * If the iq id is greater than the number of qsets,
124 * then assume it is an absolute qid.
126 if (fs->iq < pi->n_rx_qsets)
127 iq = adapter->sge.ethrxq[pi->first_qset +
136 /* Return an error number if the indicated filter isn't writable ... */
137 static int writable_filter(struct filter_entry *f)
148 * Send CPL_SET_TCB_FIELD message
150 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
151 u16 word, u64 mask, u64 val, int no_reply)
153 struct rte_mbuf *mbuf;
154 struct cpl_set_tcb_field *req;
155 struct sge_ctrl_txq *ctrlq;
157 ctrlq = &adapter->sge.ctrlq[0];
158 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
161 mbuf->data_len = sizeof(*req);
162 mbuf->pkt_len = mbuf->data_len;
164 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
165 memset(req, 0, sizeof(*req));
166 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
167 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
168 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
169 V_NO_REPLY(no_reply));
170 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
171 req->mask = cpu_to_be64(mask);
172 req->val = cpu_to_be64(val);
174 t4_mgmt_tx(ctrlq, mbuf);
178 * Set one of the t_flags bits in the TCB.
180 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
181 unsigned int bit_pos, unsigned int val, int no_reply)
183 set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
184 (unsigned long long)val << bit_pos, no_reply);
188 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
190 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
191 struct cpl_set_tcb_field *req,
193 u64 mask, u64 val, u8 cookie,
196 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
197 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
199 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
200 V_ULP_TXPKT_DEST(0));
201 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
202 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
203 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
204 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
205 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
207 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
208 req->mask = cpu_to_be64(mask);
209 req->val = cpu_to_be64(val);
210 sc = (struct ulptx_idata *)(req + 1);
211 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
212 sc->len = cpu_to_be32(0);
216 * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
217 * IPv4 requires only 1 slot on all cards.
219 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
221 if (family == FILTER_TYPE_IPV6) {
222 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
232 * Check if entries are already filled.
234 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
239 /* Ensure there's enough slots available. */
240 t4_os_lock(&t->ftid_lock);
241 for (i = fidx; i < fidx + nentries; i++) {
242 if (rte_bitmap_get(t->ftid_bmap, i)) {
247 t4_os_unlock(&t->ftid_lock);
252 * Allocate available free entries.
254 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
256 struct tid_info *t = &adap->tids;
258 int size = t->nftids;
260 t4_os_lock(&t->ftid_lock);
262 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
265 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
266 t4_os_unlock(&t->ftid_lock);
268 return pos < size ? pos : -1;
272 * Construct hash filter ntuple.
274 static u64 hash_filter_ntuple(const struct filter_entry *f)
276 struct adapter *adap = ethdev2adap(f->dev);
277 struct tp_params *tp = &adap->params.tp;
279 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
281 if (tp->port_shift >= 0 && f->fs.mask.iport)
282 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
284 if (tp->protocol_shift >= 0) {
285 if (!f->fs.val.proto)
286 ntuple |= (u64)tcp_proto << tp->protocol_shift;
288 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
291 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
292 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
293 if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
294 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
300 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
302 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
305 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
306 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
308 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
309 V_ULP_TXPKT_DEST(0));
310 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
311 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
312 sc->len = cpu_to_be32(sizeof(*abort_req) -
313 sizeof(struct work_request_hdr));
314 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
315 abort_req->rsvd0 = cpu_to_be32(0);
316 abort_req->rsvd1 = 0;
317 abort_req->cmd = CPL_ABORT_NO_RST;
318 sc = (struct ulptx_idata *)(abort_req + 1);
319 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
320 sc->len = cpu_to_be32(0);
324 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
326 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
329 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
330 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
332 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
333 V_ULP_TXPKT_DEST(0));
334 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
335 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
336 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
337 sizeof(struct work_request_hdr));
338 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
339 abort_rpl->rsvd0 = cpu_to_be32(0);
340 abort_rpl->rsvd1 = 0;
341 abort_rpl->cmd = CPL_ABORT_NO_RST;
342 sc = (struct ulptx_idata *)(abort_rpl + 1);
343 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
344 sc->len = cpu_to_be32(0);
348 * Delete the specified hash filter.
350 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
351 unsigned int filter_id,
352 struct filter_ctx *ctx)
354 struct adapter *adapter = ethdev2adap(dev);
355 struct tid_info *t = &adapter->tids;
356 struct filter_entry *f;
357 struct sge_ctrl_txq *ctrlq;
358 unsigned int port_id = ethdev2pinfo(dev)->port_id;
361 if (filter_id > adapter->tids.ntids)
364 f = lookup_tid(t, filter_id);
366 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
367 __func__, filter_id);
371 ret = writable_filter(f);
377 struct rte_mbuf *mbuf;
378 struct work_request_hdr *wr;
379 struct ulptx_idata *aligner;
380 struct cpl_set_tcb_field *req;
381 struct cpl_abort_req *abort_req;
382 struct cpl_abort_rpl *abort_rpl;
387 wrlen = cxgbe_roundup(sizeof(*wr) +
388 (sizeof(*req) + sizeof(*aligner)) +
389 sizeof(*abort_req) + sizeof(*abort_rpl),
392 ctrlq = &adapter->sge.ctrlq[port_id];
393 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
395 dev_err(adapter, "%s: could not allocate skb ..\n",
400 mbuf->data_len = wrlen;
401 mbuf->pkt_len = mbuf->data_len;
403 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
404 INIT_ULPTX_WR(req, wrlen, 0, 0);
405 wr = (struct work_request_hdr *)req;
407 req = (struct cpl_set_tcb_field *)wr;
408 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
409 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
410 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
412 aligner = (struct ulptx_idata *)(req + 1);
413 abort_req = (struct cpl_abort_req *)(aligner + 1);
414 mk_abort_req_ulp(abort_req, f->tid);
415 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
416 mk_abort_rpl_ulp(abort_rpl, f->tid);
417 t4_mgmt_tx(ctrlq, mbuf);
426 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
428 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
429 unsigned int qid_filterid, struct adapter *adap)
431 struct cpl_t6_act_open_req6 *req = NULL;
432 u64 local_lo, local_hi, peer_lo, peer_hi;
433 u32 *lip = (u32 *)f->fs.val.lip;
434 u32 *fip = (u32 *)f->fs.val.fip;
436 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
438 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
443 dev_err(adap, "%s: unsupported chip type!\n", __func__);
447 local_hi = ((u64)lip[1]) << 32 | lip[0];
448 local_lo = ((u64)lip[3]) << 32 | lip[2];
449 peer_hi = ((u64)fip[1]) << 32 | fip[0];
450 peer_lo = ((u64)fip[3]) << 32 | fip[2];
452 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
454 req->local_port = cpu_to_be16(f->fs.val.lport);
455 req->peer_port = cpu_to_be16(f->fs.val.fport);
456 req->local_ip_hi = local_hi;
457 req->local_ip_lo = local_lo;
458 req->peer_ip_hi = peer_hi;
459 req->peer_ip_lo = peer_lo;
460 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
461 f->fs.newvlan == VLAN_REWRITE) |
462 V_DELACK(f->fs.hitcnts) |
463 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
464 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
466 V_TX_CHAN(f->fs.eport) |
467 V_ULP_MODE(ULP_MODE_NONE) |
468 F_TCAM_BYPASS | F_NON_OFFLOAD);
469 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
470 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
471 V_RSS_QUEUE(f->fs.iq) |
474 V_SACK_EN(f->fs.swapmac) |
475 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
476 (f->fs.dirsteer << 1)) |
477 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
481 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
483 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
484 unsigned int qid_filterid, struct adapter *adap)
486 struct cpl_t6_act_open_req *req = NULL;
488 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
490 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
495 dev_err(adap, "%s: unsupported chip type!\n", __func__);
499 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
501 req->local_port = cpu_to_be16(f->fs.val.lport);
502 req->peer_port = cpu_to_be16(f->fs.val.fport);
503 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
504 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
505 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
506 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
507 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
508 f->fs.newvlan == VLAN_REWRITE) |
509 V_DELACK(f->fs.hitcnts) |
510 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
511 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
513 V_TX_CHAN(f->fs.eport) |
514 V_ULP_MODE(ULP_MODE_NONE) |
515 F_TCAM_BYPASS | F_NON_OFFLOAD);
516 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
517 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
518 V_RSS_QUEUE(f->fs.iq) |
521 V_SACK_EN(f->fs.swapmac) |
522 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
523 (f->fs.dirsteer << 1)) |
524 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
528 * Set the specified hash filter.
530 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
531 struct ch_filter_specification *fs,
532 struct filter_ctx *ctx)
534 struct port_info *pi = ethdev2pinfo(dev);
535 struct adapter *adapter = pi->adapter;
536 struct tid_info *t = &adapter->tids;
537 struct filter_entry *f;
538 struct rte_mbuf *mbuf;
539 struct sge_ctrl_txq *ctrlq;
544 ret = cxgbe_validate_filter(adapter, fs);
548 iq = get_filter_steerq(dev, fs);
550 ctrlq = &adapter->sge.ctrlq[pi->port_id];
552 f = t4_os_alloc(sizeof(*f));
562 * If the new filter requires loopback Destination MAC and/or VLAN
563 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
566 if (f->fs.newvlan == VLAN_INSERT ||
567 f->fs.newvlan == VLAN_REWRITE) {
568 /* allocate L2T entry for new filter */
569 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
570 f->fs.eport, f->fs.dmac);
577 atid = cxgbe_alloc_atid(t, f);
581 if (f->fs.type == FILTER_TYPE_IPV6) {
582 /* IPv6 hash filter */
583 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
587 size = sizeof(struct cpl_t6_act_open_req6);
588 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
594 mbuf->data_len = size;
595 mbuf->pkt_len = mbuf->data_len;
597 mk_act_open_req6(f, mbuf,
598 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
601 /* IPv4 hash filter */
602 size = sizeof(struct cpl_t6_act_open_req);
603 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
609 mbuf->data_len = size;
610 mbuf->pkt_len = mbuf->data_len;
612 mk_act_open_req(f, mbuf,
613 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
618 t4_mgmt_tx(ctrlq, mbuf);
622 cxgbe_clip_release(f->dev, f->clipt);
624 cxgbe_free_atid(t, atid);
632 * Clear a filter and release any of its resources that we own. This also
633 * clears the filter's "pending" status.
635 static void clear_filter(struct filter_entry *f)
638 cxgbe_clip_release(f->dev, f->clipt);
641 * The zeroing of the filter rule below clears the filter valid,
642 * pending, locked flags etc. so it's all we need for
645 memset(f, 0, sizeof(*f));
649 * t4_mk_filtdelwr - create a delete filter WR
650 * @adap: adapter context
651 * @ftid: the filter ID
652 * @wr: the filter work request to populate
653 * @qid: ingress queue to receive the delete notification
655 * Creates a filter work request to delete the supplied filter. If @qid is
656 * negative the delete notification is suppressed.
658 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
659 struct fw_filter2_wr *wr, int qid)
661 memset(wr, 0, sizeof(*wr));
662 if (adap->params.filter2_wr_support)
663 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
665 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
666 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
667 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
668 V_FW_FILTER_WR_NOREPLY(qid < 0));
669 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
671 wr->rx_chan_rx_rpl_iq =
672 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
676 * Create FW work request to delete the filter at a specified index
678 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
680 struct adapter *adapter = ethdev2adap(dev);
681 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
682 struct rte_mbuf *mbuf;
683 struct fw_filter2_wr *fwr;
684 struct sge_ctrl_txq *ctrlq;
685 unsigned int port_id = ethdev2pinfo(dev)->port_id;
687 ctrlq = &adapter->sge.ctrlq[port_id];
688 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
692 mbuf->data_len = sizeof(*fwr);
693 mbuf->pkt_len = mbuf->data_len;
695 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
696 t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
699 * Mark the filter as "pending" and ship off the Filter Work Request.
700 * When we get the Work Request Reply we'll clear the pending status.
703 t4_mgmt_tx(ctrlq, mbuf);
707 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
709 struct adapter *adapter = ethdev2adap(dev);
710 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
711 struct rte_mbuf *mbuf;
712 struct fw_filter2_wr *fwr;
713 struct sge_ctrl_txq *ctrlq;
714 unsigned int port_id = ethdev2pinfo(dev)->port_id;
718 * If the new filter requires loopback Destination MAC and/or VLAN
719 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
723 /* allocate L2T entry for new filter */
724 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
725 f->fs.eport, f->fs.dmac);
730 ctrlq = &adapter->sge.ctrlq[port_id];
731 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
737 mbuf->data_len = sizeof(*fwr);
738 mbuf->pkt_len = mbuf->data_len;
740 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
741 memset(fwr, 0, sizeof(*fwr));
744 * Construct the work request to set the filter.
746 if (adapter->params.filter2_wr_support)
747 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
749 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
750 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
752 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
753 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
754 V_FW_FILTER_WR_NOREPLY(0) |
755 V_FW_FILTER_WR_IQ(f->fs.iq));
756 fwr->del_filter_to_l2tix =
757 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
758 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
759 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
760 V_FW_FILTER_WR_INSVLAN
761 (f->fs.newvlan == VLAN_INSERT ||
762 f->fs.newvlan == VLAN_REWRITE) |
763 V_FW_FILTER_WR_RMVLAN
764 (f->fs.newvlan == VLAN_REMOVE ||
765 f->fs.newvlan == VLAN_REWRITE) |
766 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
767 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
768 V_FW_FILTER_WR_PRIO(f->fs.prio) |
769 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
770 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
771 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
773 fwr->rx_chan_rx_rpl_iq =
774 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
775 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
777 fwr->maci_to_matchtypem =
778 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
779 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
780 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
781 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
782 fwr->ptcl = f->fs.val.proto;
783 fwr->ptclm = f->fs.mask.proto;
784 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
785 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
786 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
787 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
788 fwr->lp = cpu_to_be16(f->fs.val.lport);
789 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
790 fwr->fp = cpu_to_be16(f->fs.val.fport);
791 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
793 if (adapter->params.filter2_wr_support) {
794 fwr->filter_type_swapmac =
795 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
796 fwr->natmode_to_ulp_type =
797 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
800 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
801 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
802 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
803 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
804 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
808 * Mark the filter as "pending" and ship off the Filter Work Request.
809 * When we get the Work Request Reply we'll clear the pending status.
812 t4_mgmt_tx(ctrlq, mbuf);
820 * Set the corresponding entries in the bitmap.
822 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
826 t4_os_lock(&t->ftid_lock);
827 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
828 t4_os_unlock(&t->ftid_lock);
832 for (i = fidx; i < fidx + nentries; i++)
833 rte_bitmap_set(t->ftid_bmap, i);
834 t4_os_unlock(&t->ftid_lock);
839 * Clear the corresponding entries in the bitmap.
841 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
845 t4_os_lock(&t->ftid_lock);
846 for (i = fidx; i < fidx + nentries; i++)
847 rte_bitmap_clear(t->ftid_bmap, i);
848 t4_os_unlock(&t->ftid_lock);
852 * Check a delete filter request for validity and send it to the hardware.
853 * Return 0 on success, an error number otherwise. We attach any provided
854 * filter operation context to the internal filter specification in order to
855 * facilitate signaling completion of the operation.
857 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
858 struct ch_filter_specification *fs,
859 struct filter_ctx *ctx)
861 struct port_info *pi = dev->data->dev_private;
862 struct adapter *adapter = pi->adapter;
863 struct filter_entry *f;
864 unsigned int chip_ver;
868 if (is_hashfilter(adapter) && fs->cap)
869 return cxgbe_del_hash_filter(dev, filter_id, ctx);
871 if (filter_id >= adapter->tids.nftids)
874 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
877 * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
878 * and 4 slot boundary for cards below T6.
880 if (fs->type == FILTER_TYPE_IPV6) {
881 if (chip_ver < CHELSIO_T6)
887 nentries = cxgbe_filter_slots(adapter, fs->type);
888 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
890 dev_warn(adap, "%s: could not find filter entry: %u\n",
891 __func__, filter_id);
895 f = &adapter->tids.ftid_tab[filter_id];
896 ret = writable_filter(f);
902 cxgbe_clear_ftid(&adapter->tids,
903 f->tid - adapter->tids.ftid_base,
905 return del_filter_wr(dev, filter_id);
909 * If the caller has passed in a Completion Context then we need to
910 * mark it as a successful completion so they don't stall waiting
915 t4_complete(&ctx->completion);
922 * Check a Chelsio Filter Request for validity, convert it into our internal
923 * format and send it to the hardware. Return 0 on success, an error number
924 * otherwise. We attach any provided filter operation context to the internal
925 * filter specification in order to facilitate signaling completion of the
928 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
929 struct ch_filter_specification *fs,
930 struct filter_ctx *ctx)
932 struct port_info *pi = ethdev2pinfo(dev);
933 struct adapter *adapter = pi->adapter;
934 unsigned int fidx, iq;
935 struct filter_entry *f;
936 unsigned int chip_ver;
937 u8 nentries, bitoff[16] = {0};
940 if (is_hashfilter(adapter) && fs->cap)
941 return cxgbe_set_hash_filter(dev, fs, ctx);
943 if (filter_id >= adapter->tids.nftids)
946 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
948 ret = cxgbe_validate_filter(adapter, fs);
953 * IPv6 filters occupy four slots and must be aligned on four-slot
954 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
955 * must be aligned on two-slot boundaries.
957 * IPv4 filters only occupy a single slot and have no alignment
961 if (fs->type == FILTER_TYPE_IPV6) {
962 if (chip_ver < CHELSIO_T6)
968 if (fidx != filter_id)
971 nentries = cxgbe_filter_slots(adapter, fs->type);
972 ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
976 iq = get_filter_steerq(dev, fs);
979 * Check to make sure that provided filter index is not
980 * already in use by someone else
982 f = &adapter->tids.ftid_tab[filter_id];
986 fidx = adapter->tids.ftid_base + filter_id;
987 ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
992 * Check to make sure the filter requested is writable ...
994 ret = writable_filter(f);
996 /* Clear the bits we have set above */
997 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1002 * Allocate a clip table entry only if we have non-zero IPv6 address
1004 if (chip_ver > CHELSIO_T5 && fs->type &&
1005 memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1006 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip);
1012 * Convert the filter specification into our internal format.
1013 * We copy the PF/VF specification into the Outer VLAN field
1014 * here so the rest of the code -- including the interface to
1015 * the firmware -- doesn't have to constantly do these checks.
1022 * Attempt to set the filter. If we don't succeed, we clear
1023 * it and return the failure.
1026 f->tid = fidx; /* Save the actual tid */
1027 ret = set_filter_wr(dev, filter_id);
1034 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1040 * Handle a Hash filter write reply.
1042 void cxgbe_hash_filter_rpl(struct adapter *adap,
1043 const struct cpl_act_open_rpl *rpl)
1045 struct tid_info *t = &adap->tids;
1046 struct filter_entry *f;
1047 struct filter_ctx *ctx = NULL;
1048 unsigned int tid = GET_TID(rpl);
1049 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1050 (be32_to_cpu(rpl->atid_status)));
1051 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1053 f = lookup_atid(t, ftid);
1055 dev_warn(adap, "%s: could not find filter entry: %d\n",
1064 case CPL_ERR_NONE: {
1066 f->pending = 0; /* asynchronous setup completed */
1069 cxgbe_insert_tid(t, f, f->tid, 0);
1070 cxgbe_free_atid(t, ftid);
1076 set_tcb_field(adap, tid,
1078 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1079 V_TCB_T_RTT_TS_RECENT_AGE
1080 (M_TCB_T_RTT_TS_RECENT_AGE),
1081 V_TCB_TIMESTAMP(0ULL) |
1082 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1084 if (f->fs.newvlan == VLAN_INSERT ||
1085 f->fs.newvlan == VLAN_REWRITE)
1086 set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1090 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1094 if (status == CPL_ERR_TCAM_FULL)
1095 ctx->result = -EAGAIN;
1097 ctx->result = -EINVAL;
1100 cxgbe_free_atid(t, ftid);
1105 t4_complete(&ctx->completion);
1109 * Handle a LE-TCAM filter write/deletion reply.
1111 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1113 struct filter_entry *f = NULL;
1114 unsigned int tid = GET_TID(rpl);
1115 int idx, max_fidx = adap->tids.nftids;
1117 /* Get the corresponding filter entry for this tid */
1118 if (adap->tids.ftid_tab) {
1119 /* Check this in normal filter region */
1120 idx = tid - adap->tids.ftid_base;
1121 if (idx >= max_fidx)
1124 f = &adap->tids.ftid_tab[idx];
1129 /* We found the filter entry for this tid */
1131 unsigned int ret = G_COOKIE(rpl->cookie);
1132 struct filter_ctx *ctx;
1135 * Pull off any filter operation context attached to the
1141 if (ret == FW_FILTER_WR_FLT_ADDED) {
1142 f->pending = 0; /* asynchronous setup completed */
1148 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1150 * Clear the filter when we get confirmation from the
1151 * hardware that the filter has been deleted.
1158 * Something went wrong. Issue a warning about the
1159 * problem and clear everything out.
1161 dev_warn(adap, "filter %u setup failed with error %u\n",
1165 ctx->result = -EINVAL;
1169 t4_complete(&ctx->completion);
1174 * Retrieve the packet count for the specified filter.
1176 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1177 u64 *c, int hash, bool get_byte)
1179 struct filter_entry *f;
1180 unsigned int tcb_base, tcbaddr;
1183 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1184 if (is_hashfilter(adapter) && hash) {
1185 if (fidx < adapter->tids.ntids) {
1186 f = adapter->tids.tid_tab[fidx];
1190 if (is_t5(adapter->params.chip)) {
1194 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1200 if (fidx >= adapter->tids.nftids)
1203 f = &adapter->tids.ftid_tab[fidx];
1207 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1210 f = &adapter->tids.ftid_tab[fidx];
1215 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1217 * For T5, the Filter Packet Hit Count is maintained as a
1218 * 32-bit Big Endian value in the TCB field {timestamp}.
1219 * Similar to the craziness above, instead of the filter hit
1220 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1221 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1224 unsigned int word_offset = 4;
1225 __be64 be64_byte_count;
1227 t4_os_lock(&adapter->win0_lock);
1228 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1230 (word_offset * sizeof(__be32)),
1231 sizeof(be64_byte_count),
1234 t4_os_unlock(&adapter->win0_lock);
1237 *c = be64_to_cpu(be64_byte_count);
1239 unsigned int word_offset = 6;
1242 t4_os_lock(&adapter->win0_lock);
1243 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1245 (word_offset * sizeof(__be32)),
1246 sizeof(be32_count), &be32_count,
1248 t4_os_unlock(&adapter->win0_lock);
1251 *c = (u64)be32_to_cpu(be32_count);
1258 * Clear the packet count for the specified filter.
1260 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1261 int hash, bool clear_byte)
1263 u64 tcb_mask = 0, tcb_val = 0;
1264 struct filter_entry *f = NULL;
1267 if (is_hashfilter(adapter) && hash) {
1268 if (fidx >= adapter->tids.ntids)
1271 /* No hitcounts supported for T5 hashfilters */
1272 if (is_t5(adapter->params.chip))
1275 f = adapter->tids.tid_tab[fidx];
1277 if (fidx >= adapter->tids.nftids)
1280 f = &adapter->tids.ftid_tab[fidx];
1283 if (!f || !f->valid)
1286 tcb_word = W_TCB_TIMESTAMP;
1287 tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1288 tcb_val = V_TCB_TIMESTAMP(0ULL);
1290 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1293 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1295 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1296 V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1297 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1298 V_TCB_T_RTSEQ_RECENT(0ULL);
1300 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1307 * Handle a Hash filter delete reply.
1309 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1310 const struct cpl_abort_rpl_rss *rpl)
1312 struct tid_info *t = &adap->tids;
1313 struct filter_entry *f;
1314 struct filter_ctx *ctx = NULL;
1315 unsigned int tid = GET_TID(rpl);
1317 f = lookup_tid(t, tid);
1319 dev_warn(adap, "%s: could not find filter entry: %u\n",
1330 cxgbe_clip_release(f->dev, f->clipt);
1332 cxgbe_remove_tid(t, 0, tid, 0);
1337 t4_complete(&ctx->completion);