1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
9 #include "cxgbe_filter.h"
14 * Initialize Hash Filters
16 int init_hash_filter(struct adapter *adap)
18 unsigned int n_user_filters;
19 unsigned int user_filter_perc;
21 u32 params[7], val[7];
23 #define FW_PARAM_DEV(param) \
24 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
25 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
27 #define FW_PARAM_PFVF(param) \
28 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
29 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
30 V_FW_PARAMS_PARAM_Y(0) | \
31 V_FW_PARAMS_PARAM_Z(0))
33 params[0] = FW_PARAM_DEV(NTID);
34 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
38 adap->tids.ntids = val[0];
39 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
41 user_filter_perc = 100;
42 n_user_filters = mult_frac(adap->tids.nftids,
46 adap->tids.nftids = n_user_filters;
47 adap->params.hash_filter = 1;
52 * Validate if the requested filter specification can be set by checking
53 * if the requested features have been enabled
55 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
60 * Check for unconfigured fields being used.
62 fconf = adapter->params.tp.vlan_pri_map;
65 (fs->val._field || fs->mask._field)
66 #define U(_mask, _field) \
67 (!(fconf & (_mask)) && S(_field))
69 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
76 * If the user is requesting that the filter action loop
77 * matching packets back out one of our ports, make sure that
78 * the egress port is in range.
80 if (fs->action == FILTER_SWITCH &&
81 fs->eport >= adapter->params.nports)
85 * Don't allow various trivially obvious bogus out-of-range
88 if (fs->val.iport >= adapter->params.nports)
95 * Get the queue to which the traffic must be steered to.
97 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
98 struct ch_filter_specification *fs)
100 struct port_info *pi = ethdev2pinfo(dev);
101 struct adapter *adapter = pi->adapter;
105 * If the user has requested steering matching Ingress Packets
106 * to a specific Queue Set, we need to make sure it's in range
107 * for the port and map that into the Absolute Queue ID of the
108 * Queue Set's Response Queue.
114 * If the iq id is greater than the number of qsets,
115 * then assume it is an absolute qid.
117 if (fs->iq < pi->n_rx_qsets)
118 iq = adapter->sge.ethrxq[pi->first_qset +
127 /* Return an error number if the indicated filter isn't writable ... */
128 int writable_filter(struct filter_entry *f)
139 * Send CPL_SET_TCB_FIELD message
141 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
142 u16 word, u64 mask, u64 val, int no_reply)
144 struct rte_mbuf *mbuf;
145 struct cpl_set_tcb_field *req;
146 struct sge_ctrl_txq *ctrlq;
148 ctrlq = &adapter->sge.ctrlq[0];
149 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
152 mbuf->data_len = sizeof(*req);
153 mbuf->pkt_len = mbuf->data_len;
155 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
156 memset(req, 0, sizeof(*req));
157 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
158 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
159 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
160 V_NO_REPLY(no_reply));
161 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
162 req->mask = cpu_to_be64(mask);
163 req->val = cpu_to_be64(val);
165 t4_mgmt_tx(ctrlq, mbuf);
169 * Set one of the t_flags bits in the TCB.
171 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
172 unsigned int bit_pos, unsigned int val, int no_reply)
174 set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
175 (unsigned long long)val << bit_pos, no_reply);
179 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
181 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
182 struct cpl_set_tcb_field *req,
184 u64 mask, u64 val, u8 cookie,
187 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
188 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
190 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
191 V_ULP_TXPKT_DEST(0));
192 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
193 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
194 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
195 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
196 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
198 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
199 req->mask = cpu_to_be64(mask);
200 req->val = cpu_to_be64(val);
201 sc = (struct ulptx_idata *)(req + 1);
202 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
203 sc->len = cpu_to_be32(0);
207 * Check if entry already filled.
209 bool is_filter_set(struct tid_info *t, int fidx, int family)
214 /* IPv6 requires four slots and IPv4 requires only 1 slot.
215 * Ensure, there's enough slots available.
217 max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
219 t4_os_lock(&t->ftid_lock);
220 for (i = fidx; i <= max; i++) {
221 if (rte_bitmap_get(t->ftid_bmap, i)) {
226 t4_os_unlock(&t->ftid_lock);
231 * Allocate a available free entry
233 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
235 struct tid_info *t = &adap->tids;
237 int size = t->nftids;
239 t4_os_lock(&t->ftid_lock);
240 if (family == FILTER_TYPE_IPV6)
241 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
243 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
244 t4_os_unlock(&t->ftid_lock);
246 return pos < size ? pos : -1;
250 * Construct hash filter ntuple.
252 static u64 hash_filter_ntuple(const struct filter_entry *f)
254 struct adapter *adap = ethdev2adap(f->dev);
255 struct tp_params *tp = &adap->params.tp;
257 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
259 if (tp->port_shift >= 0)
260 ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
262 if (tp->protocol_shift >= 0) {
263 if (!f->fs.val.proto)
264 ntuple |= (u64)tcp_proto << tp->protocol_shift;
266 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
269 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
270 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
272 if (ntuple != tp->hash_filter_mask)
279 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
281 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
284 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
285 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
287 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
288 V_ULP_TXPKT_DEST(0));
289 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
290 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
291 sc->len = cpu_to_be32(sizeof(*abort_req) -
292 sizeof(struct work_request_hdr));
293 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
294 abort_req->rsvd0 = cpu_to_be32(0);
295 abort_req->rsvd1 = 0;
296 abort_req->cmd = CPL_ABORT_NO_RST;
297 sc = (struct ulptx_idata *)(abort_req + 1);
298 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
299 sc->len = cpu_to_be32(0);
303 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
305 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
308 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
309 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
311 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
312 V_ULP_TXPKT_DEST(0));
313 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
314 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
315 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
316 sizeof(struct work_request_hdr));
317 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
318 abort_rpl->rsvd0 = cpu_to_be32(0);
319 abort_rpl->rsvd1 = 0;
320 abort_rpl->cmd = CPL_ABORT_NO_RST;
321 sc = (struct ulptx_idata *)(abort_rpl + 1);
322 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
323 sc->len = cpu_to_be32(0);
327 * Delete the specified hash filter.
329 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
330 unsigned int filter_id,
331 struct filter_ctx *ctx)
333 struct adapter *adapter = ethdev2adap(dev);
334 struct tid_info *t = &adapter->tids;
335 struct filter_entry *f;
336 struct sge_ctrl_txq *ctrlq;
337 unsigned int port_id = ethdev2pinfo(dev)->port_id;
340 if (filter_id > adapter->tids.ntids)
343 f = lookup_tid(t, filter_id);
345 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
346 __func__, filter_id);
350 ret = writable_filter(f);
356 struct rte_mbuf *mbuf;
357 struct work_request_hdr *wr;
358 struct ulptx_idata *aligner;
359 struct cpl_set_tcb_field *req;
360 struct cpl_abort_req *abort_req;
361 struct cpl_abort_rpl *abort_rpl;
366 wrlen = cxgbe_roundup(sizeof(*wr) +
367 (sizeof(*req) + sizeof(*aligner)) +
368 sizeof(*abort_req) + sizeof(*abort_rpl),
371 ctrlq = &adapter->sge.ctrlq[port_id];
372 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
374 dev_err(adapter, "%s: could not allocate skb ..\n",
379 mbuf->data_len = wrlen;
380 mbuf->pkt_len = mbuf->data_len;
382 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
383 INIT_ULPTX_WR(req, wrlen, 0, 0);
384 wr = (struct work_request_hdr *)req;
386 req = (struct cpl_set_tcb_field *)wr;
387 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
388 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
389 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
391 aligner = (struct ulptx_idata *)(req + 1);
392 abort_req = (struct cpl_abort_req *)(aligner + 1);
393 mk_abort_req_ulp(abort_req, f->tid);
394 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
395 mk_abort_rpl_ulp(abort_rpl, f->tid);
396 t4_mgmt_tx(ctrlq, mbuf);
405 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
407 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
408 unsigned int qid_filterid, struct adapter *adap)
410 struct cpl_t6_act_open_req6 *req = NULL;
411 u64 local_lo, local_hi, peer_lo, peer_hi;
412 u32 *lip = (u32 *)f->fs.val.lip;
413 u32 *fip = (u32 *)f->fs.val.fip;
415 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
417 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
422 dev_err(adap, "%s: unsupported chip type!\n", __func__);
426 local_hi = ((u64)lip[1]) << 32 | lip[0];
427 local_lo = ((u64)lip[3]) << 32 | lip[2];
428 peer_hi = ((u64)fip[1]) << 32 | fip[0];
429 peer_lo = ((u64)fip[3]) << 32 | fip[2];
431 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
433 req->local_port = cpu_to_be16(f->fs.val.lport);
434 req->peer_port = cpu_to_be16(f->fs.val.fport);
435 req->local_ip_hi = local_hi;
436 req->local_ip_lo = local_lo;
437 req->peer_ip_hi = peer_hi;
438 req->peer_ip_lo = peer_lo;
439 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
440 f->fs.newvlan == VLAN_REWRITE) |
441 V_DELACK(f->fs.hitcnts) |
442 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
443 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
445 V_TX_CHAN(f->fs.eport) |
446 V_ULP_MODE(ULP_MODE_NONE) |
447 F_TCAM_BYPASS | F_NON_OFFLOAD);
448 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
449 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
450 V_RSS_QUEUE(f->fs.iq) |
453 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
454 (f->fs.dirsteer << 1)) |
455 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
459 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
461 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
462 unsigned int qid_filterid, struct adapter *adap)
464 struct cpl_t6_act_open_req *req = NULL;
466 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
468 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
473 dev_err(adap, "%s: unsupported chip type!\n", __func__);
477 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
479 req->local_port = cpu_to_be16(f->fs.val.lport);
480 req->peer_port = cpu_to_be16(f->fs.val.fport);
481 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
482 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
483 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
484 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
485 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
486 f->fs.newvlan == VLAN_REWRITE) |
487 V_DELACK(f->fs.hitcnts) |
488 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
489 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
491 V_TX_CHAN(f->fs.eport) |
492 V_ULP_MODE(ULP_MODE_NONE) |
493 F_TCAM_BYPASS | F_NON_OFFLOAD);
494 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
495 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
496 V_RSS_QUEUE(f->fs.iq) |
499 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
500 (f->fs.dirsteer << 1)) |
501 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
505 * Set the specified hash filter.
507 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
508 struct ch_filter_specification *fs,
509 struct filter_ctx *ctx)
511 struct port_info *pi = ethdev2pinfo(dev);
512 struct adapter *adapter = pi->adapter;
513 struct tid_info *t = &adapter->tids;
514 struct filter_entry *f;
515 struct rte_mbuf *mbuf;
516 struct sge_ctrl_txq *ctrlq;
521 ret = validate_filter(adapter, fs);
525 iq = get_filter_steerq(dev, fs);
527 ctrlq = &adapter->sge.ctrlq[pi->port_id];
529 f = t4_os_alloc(sizeof(*f));
539 * If the new filter requires loopback Destination MAC and/or VLAN
540 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
543 if (f->fs.newvlan == VLAN_INSERT ||
544 f->fs.newvlan == VLAN_REWRITE) {
545 /* allocate L2T entry for new filter */
546 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
547 f->fs.eport, f->fs.dmac);
554 atid = cxgbe_alloc_atid(t, f);
559 /* IPv6 hash filter */
560 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
564 size = sizeof(struct cpl_t6_act_open_req6);
565 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
571 mbuf->data_len = size;
572 mbuf->pkt_len = mbuf->data_len;
574 mk_act_open_req6(f, mbuf,
575 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
578 /* IPv4 hash filter */
579 size = sizeof(struct cpl_t6_act_open_req);
580 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
586 mbuf->data_len = size;
587 mbuf->pkt_len = mbuf->data_len;
589 mk_act_open_req(f, mbuf,
590 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
595 t4_mgmt_tx(ctrlq, mbuf);
599 cxgbe_clip_release(f->dev, f->clipt);
601 cxgbe_free_atid(t, atid);
609 * Clear a filter and release any of its resources that we own. This also
610 * clears the filter's "pending" status.
612 void clear_filter(struct filter_entry *f)
615 cxgbe_clip_release(f->dev, f->clipt);
618 * The zeroing of the filter rule below clears the filter valid,
619 * pending, locked flags etc. so it's all we need for
622 memset(f, 0, sizeof(*f));
626 * t4_mk_filtdelwr - create a delete filter WR
627 * @ftid: the filter ID
628 * @wr: the filter work request to populate
629 * @qid: ingress queue to receive the delete notification
631 * Creates a filter work request to delete the supplied filter. If @qid is
632 * negative the delete notification is suppressed.
634 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
636 memset(wr, 0, sizeof(*wr));
637 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
638 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
639 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
640 V_FW_FILTER_WR_NOREPLY(qid < 0));
641 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
643 wr->rx_chan_rx_rpl_iq =
644 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
648 * Create FW work request to delete the filter at a specified index
650 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
652 struct adapter *adapter = ethdev2adap(dev);
653 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
654 struct rte_mbuf *mbuf;
655 struct fw_filter_wr *fwr;
656 struct sge_ctrl_txq *ctrlq;
657 unsigned int port_id = ethdev2pinfo(dev)->port_id;
659 ctrlq = &adapter->sge.ctrlq[port_id];
660 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
664 mbuf->data_len = sizeof(*fwr);
665 mbuf->pkt_len = mbuf->data_len;
667 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
668 t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
671 * Mark the filter as "pending" and ship off the Filter Work Request.
672 * When we get the Work Request Reply we'll clear the pending status.
675 t4_mgmt_tx(ctrlq, mbuf);
679 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
681 struct adapter *adapter = ethdev2adap(dev);
682 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
683 struct rte_mbuf *mbuf;
684 struct fw_filter_wr *fwr;
685 struct sge_ctrl_txq *ctrlq;
686 unsigned int port_id = ethdev2pinfo(dev)->port_id;
690 * If the new filter requires loopback Destination MAC and/or VLAN
691 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
695 /* allocate L2T entry for new filter */
696 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
697 f->fs.eport, f->fs.dmac);
702 ctrlq = &adapter->sge.ctrlq[port_id];
703 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
709 mbuf->data_len = sizeof(*fwr);
710 mbuf->pkt_len = mbuf->data_len;
712 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
713 memset(fwr, 0, sizeof(*fwr));
716 * Construct the work request to set the filter.
718 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
719 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
721 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
722 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
723 V_FW_FILTER_WR_NOREPLY(0) |
724 V_FW_FILTER_WR_IQ(f->fs.iq));
725 fwr->del_filter_to_l2tix =
726 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
727 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
728 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
729 V_FW_FILTER_WR_INSVLAN
730 (f->fs.newvlan == VLAN_INSERT ||
731 f->fs.newvlan == VLAN_REWRITE) |
732 V_FW_FILTER_WR_RMVLAN
733 (f->fs.newvlan == VLAN_REMOVE ||
734 f->fs.newvlan == VLAN_REWRITE) |
735 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
736 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
737 V_FW_FILTER_WR_PRIO(f->fs.prio) |
738 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
739 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
740 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
742 fwr->rx_chan_rx_rpl_iq =
743 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
744 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
746 fwr->maci_to_matchtypem =
747 cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
748 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
749 fwr->ptcl = f->fs.val.proto;
750 fwr->ptclm = f->fs.mask.proto;
751 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
752 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
753 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
754 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
755 fwr->lp = cpu_to_be16(f->fs.val.lport);
756 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
757 fwr->fp = cpu_to_be16(f->fs.val.fport);
758 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
761 * Mark the filter as "pending" and ship off the Filter Work Request.
762 * When we get the Work Request Reply we'll clear the pending status.
765 t4_mgmt_tx(ctrlq, mbuf);
773 * Set the corresponding entry in the bitmap. 4 slots are
774 * marked for IPv6, whereas only 1 slot is marked for IPv4.
776 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
778 t4_os_lock(&t->ftid_lock);
779 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
780 t4_os_unlock(&t->ftid_lock);
784 if (family == FILTER_TYPE_IPV4) {
785 rte_bitmap_set(t->ftid_bmap, fidx);
787 rte_bitmap_set(t->ftid_bmap, fidx);
788 rte_bitmap_set(t->ftid_bmap, fidx + 1);
789 rte_bitmap_set(t->ftid_bmap, fidx + 2);
790 rte_bitmap_set(t->ftid_bmap, fidx + 3);
792 t4_os_unlock(&t->ftid_lock);
797 * Clear the corresponding entry in the bitmap. 4 slots are
798 * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
800 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
802 t4_os_lock(&t->ftid_lock);
803 if (family == FILTER_TYPE_IPV4) {
804 rte_bitmap_clear(t->ftid_bmap, fidx);
806 rte_bitmap_clear(t->ftid_bmap, fidx);
807 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
808 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
809 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
811 t4_os_unlock(&t->ftid_lock);
815 * Check a delete filter request for validity and send it to the hardware.
816 * Return 0 on success, an error number otherwise. We attach any provided
817 * filter operation context to the internal filter specification in order to
818 * facilitate signaling completion of the operation.
820 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
821 struct ch_filter_specification *fs,
822 struct filter_ctx *ctx)
824 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
825 struct adapter *adapter = pi->adapter;
826 struct filter_entry *f;
827 unsigned int chip_ver;
830 if (is_hashfilter(adapter) && fs->cap)
831 return cxgbe_del_hash_filter(dev, filter_id, ctx);
833 if (filter_id >= adapter->tids.nftids)
836 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
838 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
840 dev_warn(adap, "%s: could not find filter entry: %u\n",
841 __func__, filter_id);
846 * Ensure filter id is aligned on the 2 slot boundary for T6,
847 * and 4 slot boundary for cards below T6.
850 if (chip_ver < CHELSIO_T6)
856 f = &adapter->tids.ftid_tab[filter_id];
857 ret = writable_filter(f);
863 cxgbe_clear_ftid(&adapter->tids,
864 f->tid - adapter->tids.ftid_base,
865 f->fs.type ? FILTER_TYPE_IPV6 :
867 return del_filter_wr(dev, filter_id);
871 * If the caller has passed in a Completion Context then we need to
872 * mark it as a successful completion so they don't stall waiting
877 t4_complete(&ctx->completion);
884 * Check a Chelsio Filter Request for validity, convert it into our internal
885 * format and send it to the hardware. Return 0 on success, an error number
886 * otherwise. We attach any provided filter operation context to the internal
887 * filter specification in order to facilitate signaling completion of the
890 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
891 struct ch_filter_specification *fs,
892 struct filter_ctx *ctx)
894 struct port_info *pi = ethdev2pinfo(dev);
895 struct adapter *adapter = pi->adapter;
896 unsigned int fidx, iq, fid_bit = 0;
897 struct filter_entry *f;
898 unsigned int chip_ver;
899 uint8_t bitoff[16] = {0};
902 if (is_hashfilter(adapter) && fs->cap)
903 return cxgbe_set_hash_filter(dev, fs, ctx);
905 if (filter_id >= adapter->tids.nftids)
908 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
910 ret = validate_filter(adapter, fs);
915 * Ensure filter id is aligned on the 4 slot boundary for IPv6
921 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
925 iq = get_filter_steerq(dev, fs);
928 * IPv6 filters occupy four slots and must be aligned on four-slot
929 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
930 * must be aligned on two-slot boundaries.
932 * IPv4 filters only occupy a single slot and have no alignment
933 * requirements but writing a new IPv4 filter into the middle
934 * of an existing IPv6 filter requires clearing the old IPv6
937 if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
939 * For T6, If our IPv4 filter isn't being written to a
940 * multiple of two filter index and there's an IPv6
941 * filter at the multiple of 2 base slot, then we need
942 * to delete that IPv6 filter ...
943 * For adapters below T6, IPv6 filter occupies 4 entries.
945 if (chip_ver < CHELSIO_T6)
946 fidx = filter_id & ~0x3;
948 fidx = filter_id & ~0x1;
950 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
951 f = &adapter->tids.ftid_tab[fidx];
956 unsigned int max_filter_id;
958 if (chip_ver < CHELSIO_T6) {
960 * Ensure that the IPv6 filter is aligned on a
961 * multiple of 4 boundary.
966 max_filter_id = filter_id + 4;
969 * For T6, CLIP being enabled, IPv6 filter would occupy
975 max_filter_id = filter_id + 2;
979 * Check all except the base overlapping IPv4 filter
982 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
983 f = &adapter->tids.ftid_tab[fidx];
990 * Check to make sure that provided filter index is not
991 * already in use by someone else
993 f = &adapter->tids.ftid_tab[filter_id];
997 fidx = adapter->tids.ftid_base + filter_id;
999 ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
1000 fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
1005 * Check to make sure the filter requested is writable ...
1007 ret = writable_filter(f);
1009 /* Clear the bits we have set above */
1010 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1011 fs->type ? FILTER_TYPE_IPV6 :
1017 * Allocate a clip table entry only if we have non-zero IPv6 address
1019 if (chip_ver > CHELSIO_T5 && fs->type &&
1020 memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1021 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
1027 * Convert the filter specification into our internal format.
1028 * We copy the PF/VF specification into the Outer VLAN field
1029 * here so the rest of the code -- including the interface to
1030 * the firmware -- doesn't have to constantly do these checks.
1037 * Attempt to set the filter. If we don't succeed, we clear
1038 * it and return the failure.
1041 f->tid = fidx; /* Save the actual tid */
1042 ret = set_filter_wr(dev, filter_id);
1044 fid_bit = f->tid - adapter->tids.ftid_base;
1051 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1052 fs->type ? FILTER_TYPE_IPV6 :
1059 * Handle a Hash filter write reply.
1061 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1063 struct tid_info *t = &adap->tids;
1064 struct filter_entry *f;
1065 struct filter_ctx *ctx = NULL;
1066 unsigned int tid = GET_TID(rpl);
1067 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1068 (be32_to_cpu(rpl->atid_status)));
1069 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1071 f = lookup_atid(t, ftid);
1073 dev_warn(adap, "%s: could not find filter entry: %d\n",
1082 case CPL_ERR_NONE: {
1084 f->pending = 0; /* asynchronous setup completed */
1087 cxgbe_insert_tid(t, f, f->tid, 0);
1088 cxgbe_free_atid(t, ftid);
1094 set_tcb_field(adap, tid,
1096 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1097 V_TCB_T_RTT_TS_RECENT_AGE
1098 (M_TCB_T_RTT_TS_RECENT_AGE),
1099 V_TCB_TIMESTAMP(0ULL) |
1100 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1102 if (f->fs.newvlan == VLAN_INSERT ||
1103 f->fs.newvlan == VLAN_REWRITE)
1104 set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1108 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1112 if (status == CPL_ERR_TCAM_FULL)
1113 ctx->result = -EAGAIN;
1115 ctx->result = -EINVAL;
1118 cxgbe_free_atid(t, ftid);
1123 t4_complete(&ctx->completion);
1127 * Handle a LE-TCAM filter write/deletion reply.
1129 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1131 struct filter_entry *f = NULL;
1132 unsigned int tid = GET_TID(rpl);
1133 int idx, max_fidx = adap->tids.nftids;
1135 /* Get the corresponding filter entry for this tid */
1136 if (adap->tids.ftid_tab) {
1137 /* Check this in normal filter region */
1138 idx = tid - adap->tids.ftid_base;
1139 if (idx >= max_fidx)
1142 f = &adap->tids.ftid_tab[idx];
1147 /* We found the filter entry for this tid */
1149 unsigned int ret = G_COOKIE(rpl->cookie);
1150 struct filter_ctx *ctx;
1153 * Pull off any filter operation context attached to the
1159 if (ret == FW_FILTER_WR_FLT_ADDED) {
1160 f->pending = 0; /* asynchronous setup completed */
1166 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1168 * Clear the filter when we get confirmation from the
1169 * hardware that the filter has been deleted.
1176 * Something went wrong. Issue a warning about the
1177 * problem and clear everything out.
1179 dev_warn(adap, "filter %u setup failed with error %u\n",
1183 ctx->result = -EINVAL;
1187 t4_complete(&ctx->completion);
1192 * Retrieve the packet count for the specified filter.
1194 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1195 u64 *c, int hash, bool get_byte)
1197 struct filter_entry *f;
1198 unsigned int tcb_base, tcbaddr;
1201 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1202 if (is_hashfilter(adapter) && hash) {
1203 if (fidx < adapter->tids.ntids) {
1204 f = adapter->tids.tid_tab[fidx];
1208 if (is_t5(adapter->params.chip)) {
1212 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1218 if (fidx >= adapter->tids.nftids)
1221 f = &adapter->tids.ftid_tab[fidx];
1225 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1228 f = &adapter->tids.ftid_tab[fidx];
1233 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1235 * For T5, the Filter Packet Hit Count is maintained as a
1236 * 32-bit Big Endian value in the TCB field {timestamp}.
1237 * Similar to the craziness above, instead of the filter hit
1238 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1239 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1242 unsigned int word_offset = 4;
1243 __be64 be64_byte_count;
1245 t4_os_lock(&adapter->win0_lock);
1246 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1248 (word_offset * sizeof(__be32)),
1249 sizeof(be64_byte_count),
1252 t4_os_unlock(&adapter->win0_lock);
1255 *c = be64_to_cpu(be64_byte_count);
1257 unsigned int word_offset = 6;
1260 t4_os_lock(&adapter->win0_lock);
1261 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1263 (word_offset * sizeof(__be32)),
1264 sizeof(be32_count), &be32_count,
1266 t4_os_unlock(&adapter->win0_lock);
1269 *c = (u64)be32_to_cpu(be32_count);
1276 * Handle a Hash filter delete reply.
1278 void hash_del_filter_rpl(struct adapter *adap,
1279 const struct cpl_abort_rpl_rss *rpl)
1281 struct tid_info *t = &adap->tids;
1282 struct filter_entry *f;
1283 struct filter_ctx *ctx = NULL;
1284 unsigned int tid = GET_TID(rpl);
1286 f = lookup_tid(t, tid);
1288 dev_warn(adap, "%s: could not find filter entry: %u\n",
1299 cxgbe_clip_release(f->dev, f->clipt);
1301 cxgbe_remove_tid(t, 0, tid, 0);
1306 t4_complete(&ctx->completion);