1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015-2018 Cavium Inc.
10 * All rights reserved.
15 #include "ecore_init.h"
17 /**** Exe Queue interfaces ****/
20 * ecore_exe_queue_init - init the Exe Queue object
22 * @o: pointer to the object
24 * @owner: pointer to the owner
25 * @validate: validate function pointer
26 * @optimize: optimize function pointer
27 * @exec: execute function pointer
28 * @get: get function pointer
31 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
32 struct ecore_exe_queue_obj *o,
34 union ecore_qable_obj *owner,
35 exe_q_validate validate,
37 exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
39 ECORE_MEMSET(o, 0, sizeof(*o));
41 ECORE_LIST_INIT(&o->exe_queue);
42 ECORE_LIST_INIT(&o->pending_comp);
44 ECORE_SPIN_LOCK_INIT(&o->lock, sc);
46 o->exe_chunk_len = exe_len;
49 /* Owner specific callbacks */
50 o->validate = validate;
52 o->optimize = optimize;
56 ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
60 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
61 struct ecore_exeq_elem *elem)
63 ECORE_MSG(sc, "Deleting an exe_queue element");
64 ECORE_FREE(sc, elem, sizeof(*elem));
67 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
69 struct ecore_exeq_elem *elem;
72 ECORE_SPIN_LOCK_BH(&o->lock);
74 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
75 struct ecore_exeq_elem) cnt++;
77 ECORE_SPIN_UNLOCK_BH(&o->lock);
83 * ecore_exe_queue_add - add a new element to the execution queue
87 * @cmd: new command to add
88 * @restore: true - do not optimize the command
90 * If the element is optimized or is illegal, frees it.
92 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
93 struct ecore_exe_queue_obj *o,
94 struct ecore_exeq_elem *elem, int restore)
98 ECORE_SPIN_LOCK_BH(&o->lock);
101 /* Try to cancel this element queue */
102 rc = o->optimize(sc, o->owner, elem);
106 /* Check if this request is ok */
107 rc = o->validate(sc, o->owner, elem);
109 ECORE_MSG(sc, "Preamble failed: %d", rc);
114 /* If so, add it to the execution queue */
115 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
117 ECORE_SPIN_UNLOCK_BH(&o->lock);
119 return ECORE_SUCCESS;
122 ecore_exe_queue_free_elem(sc, elem);
124 ECORE_SPIN_UNLOCK_BH(&o->lock);
129 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
132 struct ecore_exeq_elem *elem;
134 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
135 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
136 struct ecore_exeq_elem, link);
138 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
139 ecore_exe_queue_free_elem(sc, elem);
143 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
144 struct ecore_exe_queue_obj *o)
146 ECORE_SPIN_LOCK_BH(&o->lock);
148 __ecore_exe_queue_reset_pending(sc, o);
150 ECORE_SPIN_UNLOCK_BH(&o->lock);
154 * ecore_exe_queue_step - execute one execution chunk atomically
158 * @ramrod_flags: flags
160 * (Should be called while holding the exe_queue->lock).
162 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
163 struct ecore_exe_queue_obj *o,
164 unsigned long *ramrod_flags)
166 struct ecore_exeq_elem *elem, spacer;
169 ECORE_MEMSET(&spacer, 0, sizeof(spacer));
171 /* Next step should not be performed until the current is finished,
172 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
173 * properly clear object internals without sending any command to the FW
174 * which also implies there won't be any completion to clear the
177 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
178 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
180 "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
181 __ecore_exe_queue_reset_pending(sc, o);
183 return ECORE_PENDING;
187 /* Run through the pending commands list and create a next
190 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
191 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
192 struct ecore_exeq_elem, link);
193 ECORE_DBG_BREAK_IF(!elem->cmd_len);
195 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
196 cur_len += elem->cmd_len;
197 /* Prevent from both lists being empty when moving an
198 * element. This will allow the call of
199 * ecore_exe_queue_empty() without locking.
201 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
203 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
204 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
205 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
212 return ECORE_SUCCESS;
214 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
216 /* In case of an error return the commands back to the queue
217 * and reset the pending_comp.
219 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
221 /* If zero is returned, means there are no outstanding pending
222 * completions and we may dismiss the pending list.
224 __ecore_exe_queue_reset_pending(sc, o);
229 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
231 int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
233 /* Don't reorder!!! */
236 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
239 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
243 ECORE_MSG(sc, "Allocating a new exe_queue element");
244 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
247 /************************ raw_obj functions ***********************************/
248 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
251 * !! converts the value returned by ECORE_TEST_BIT such that it
252 * is guaranteed not to be truncated regardless of int definition.
254 * Note we cannot simply define the function's return value type
255 * to match the type returned by ECORE_TEST_BIT, as it varies by
256 * platform/implementation.
259 return ! !ECORE_TEST_BIT(o->state, o->pstate);
262 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
264 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
265 ECORE_CLEAR_BIT(o->state, o->pstate);
266 ECORE_SMP_MB_AFTER_CLEAR_BIT();
269 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
271 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
272 ECORE_SET_BIT(o->state, o->pstate);
273 ECORE_SMP_MB_AFTER_CLEAR_BIT();
277 * ecore_state_wait - wait until the given bit(state) is cleared
280 * @state: state which is to be cleared
281 * @state_p: state buffer
284 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
285 unsigned long *pstate)
287 /* can take a while if any port is running */
290 if (CHIP_REV_IS_EMUL(sc))
293 ECORE_MSG(sc, "waiting for state to become %d", state);
294 /* being over protective to remind bnx2x_intr_legacy() to
297 rte_atomic32_set(&sc->scan_fp, 1);
301 bnx2x_intr_legacy(sc);
302 if (!ECORE_TEST_BIT(state, pstate)) {
303 #ifdef ECORE_STOP_ON_ERROR
304 ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
306 rte_atomic32_set(&sc->scan_fp, 0);
307 return ECORE_SUCCESS;
310 ECORE_WAIT(sc, delay_us);
313 rte_atomic32_set(&sc->scan_fp, 0);
319 PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
320 rte_atomic32_set(&sc->scan_fp, 0);
321 #ifdef ECORE_STOP_ON_ERROR
325 return ECORE_TIMEOUT;
328 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
330 return ecore_state_wait(sc, raw->state, raw->pstate);
333 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
334 /* credit handling callbacks */
335 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
337 struct ecore_credit_pool_obj *mp = o->macs_pool;
339 ECORE_DBG_BREAK_IF(!mp);
341 return mp->get_entry(mp, offset);
344 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
346 struct ecore_credit_pool_obj *mp = o->macs_pool;
348 ECORE_DBG_BREAK_IF(!mp);
350 return mp->get(mp, 1);
353 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
355 struct ecore_credit_pool_obj *mp = o->macs_pool;
357 return mp->put_entry(mp, offset);
360 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
362 struct ecore_credit_pool_obj *mp = o->macs_pool;
364 return mp->put(mp, 1);
368 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
372 * @o: vlan_mac object
374 * @details: Non-blocking implementation; should be called under execution
377 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
378 struct ecore_vlan_mac_obj *o)
380 if (o->head_reader) {
381 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
385 ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
386 return ECORE_SUCCESS;
390 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
391 * which wasn't able to run due to a taken lock on vlan mac head list.
394 * @o: vlan_mac object
396 * @details Should be called under execution queue lock; notice it might release
397 * and reclaim it during its run.
399 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
400 struct ecore_vlan_mac_obj *o)
403 unsigned long ramrod_flags = o->saved_ramrod_flags;
405 ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu",
407 o->head_exe_request = FALSE;
408 o->saved_ramrod_flags = 0;
409 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
410 if (rc != ECORE_SUCCESS) {
412 "execution of pending commands failed with rc %d",
414 #ifdef ECORE_STOP_ON_ERROR
421 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
422 * called due to vlan mac head list lock being taken.
425 * @o: vlan_mac object
426 * @ramrod_flags: ramrod flags of missed execution
428 * @details Should be called under execution queue lock.
430 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
431 struct ecore_vlan_mac_obj *o,
432 unsigned long ramrod_flags)
434 o->head_exe_request = TRUE;
435 o->saved_ramrod_flags = ramrod_flags;
436 ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu",
441 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
444 * @o: vlan_mac object
446 * @details Should be called under execution queue lock. Notice if a pending
447 * execution exists, it would perform it - possibly releasing and
448 * reclaiming the execution queue lock.
450 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
451 struct ecore_vlan_mac_obj *o)
453 /* It's possible a new pending execution was added since this writer
454 * executed. If so, execute again. [Ad infinitum]
456 while (o->head_exe_request) {
458 "vlan_mac_lock - writer release encountered a pending request");
459 __ecore_vlan_mac_h_exec_pending(sc, o);
464 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
467 * @o: vlan_mac object
469 * @details Notice if a pending execution exists, it would perform it -
470 * possibly releasing and reclaiming the execution queue lock.
472 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
473 struct ecore_vlan_mac_obj *o)
475 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
476 __ecore_vlan_mac_h_write_unlock(sc, o);
477 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
481 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
484 * @o: vlan_mac object
486 * @details Should be called under the execution queue lock. May sleep. May
487 * release and reclaim execution queue lock during its run.
489 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
490 struct ecore_vlan_mac_obj *o)
492 /* If we got here, we're holding lock --> no WRITER exists */
495 "vlan_mac_lock - locked reader - number %d", o->head_reader);
497 return ECORE_SUCCESS;
501 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
504 * @o: vlan_mac object
506 * @details May sleep. Claims and releases execution queue lock during its run.
508 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
509 struct ecore_vlan_mac_obj *o)
513 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
514 rc = __ecore_vlan_mac_h_read_lock(sc, o);
515 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
521 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
524 * @o: vlan_mac object
526 * @details Should be called under execution queue lock. Notice if a pending
527 * execution exists, it would be performed if this was the last
528 * reader. possibly releasing and reclaiming the execution queue lock.
530 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
531 struct ecore_vlan_mac_obj *o)
533 if (!o->head_reader) {
535 "Need to release vlan mac reader lock, but lock isn't taken");
536 #ifdef ECORE_STOP_ON_ERROR
541 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d",
545 /* It's possible a new pending execution was added, and that this reader
546 * was last - if so we need to execute the command.
548 if (!o->head_reader && o->head_exe_request) {
549 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request");
551 /* Writer release will do the trick */
552 __ecore_vlan_mac_h_write_unlock(sc, o);
557 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
560 * @o: vlan_mac object
562 * @details Notice if a pending execution exists, it would be performed if this
563 * was the last reader. Claims and releases the execution queue lock
566 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
567 struct ecore_vlan_mac_obj *o)
569 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
570 __ecore_vlan_mac_h_read_unlock(sc, o);
571 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
575 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
578 * @o: vlan_mac object
579 * @n: number of elements to get
580 * @base: base address for element placement
581 * @stride: stride between elements (in bytes)
583 static int ecore_get_n_elements(struct bnx2x_softc *sc,
584 struct ecore_vlan_mac_obj *o, int n,
585 uint8_t * base, uint8_t stride, uint8_t size)
587 struct ecore_vlan_mac_registry_elem *pos;
588 uint8_t *next = base;
589 int counter = 0, read_lock;
591 ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
592 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
593 if (read_lock != ECORE_SUCCESS)
595 "get_n_elements failed to get vlan mac reader lock; Access without lock");
598 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
599 struct ecore_vlan_mac_registry_elem) {
601 ECORE_MEMCPY(next, &pos->u, size);
604 (sc, "copied element number %d to address %p element was:",
606 next += stride + size;
610 if (read_lock == ECORE_SUCCESS) {
611 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
612 ecore_vlan_mac_h_read_unlock(sc, o);
615 return counter * ETH_ALEN;
618 /* check_add() callbacks */
619 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
620 struct ecore_vlan_mac_obj *o,
621 union ecore_classification_ramrod_data *data)
623 struct ecore_vlan_mac_registry_elem *pos;
625 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
626 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
627 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
629 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
632 /* Check if a requested MAC already exists */
633 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
634 struct ecore_vlan_mac_registry_elem)
635 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
636 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
639 return ECORE_SUCCESS;
642 /* check_del() callbacks */
643 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
649 ecore_classification_ramrod_data
652 struct ecore_vlan_mac_registry_elem *pos;
654 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
655 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
656 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
658 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
659 struct ecore_vlan_mac_registry_elem)
660 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
661 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
667 /* check_move() callback */
668 static int ecore_check_move(struct bnx2x_softc *sc,
669 struct ecore_vlan_mac_obj *src_o,
670 struct ecore_vlan_mac_obj *dst_o,
671 union ecore_classification_ramrod_data *data)
673 struct ecore_vlan_mac_registry_elem *pos;
676 /* Check if we can delete the requested configuration from the first
679 pos = src_o->check_del(sc, src_o, data);
681 /* check if configuration can be added */
682 rc = dst_o->check_add(sc, dst_o, data);
684 /* If this classification can not be added (is already set)
685 * or can't be deleted - return an error.
693 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
694 __rte_unused struct ecore_vlan_mac_obj
695 *src_o, __rte_unused struct ecore_vlan_mac_obj
696 *dst_o, __rte_unused union
697 ecore_classification_ramrod_data *data)
702 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
705 struct ecore_raw_obj *raw = &o->raw;
706 uint8_t rx_tx_flag = 0;
708 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
709 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
710 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
712 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
713 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
714 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
719 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
720 int add, unsigned char *dev_addr, int index)
723 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
724 NIG_REG_LLH0_FUNC_MEM;
726 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
729 if (index > ECORE_LLH_CAM_MAX_PF_LINE)
732 ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
733 (add ? "ADD" : "DELETE"), index);
736 /* LLH_FUNC_MEM is a uint64_t WB register */
737 reg_offset += 8 * index;
739 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
740 (dev_addr[4] << 8) | dev_addr[5]);
741 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
743 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
746 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
747 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
751 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
754 * @o: queue for which we want to configure this rule
755 * @add: if TRUE the command is an ADD command, DEL otherwise
756 * @opcode: CLASSIFY_RULE_OPCODE_XXX
757 * @hdr: pointer to a header to setup
760 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
762 struct eth_classify_cmd_header
765 struct ecore_raw_obj *raw = &o->raw;
767 hdr->client_id = raw->cl_id;
768 hdr->func_id = raw->func_id;
770 /* Rx or/and Tx (internal switching) configuration ? */
771 hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
774 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
776 hdr->cmd_general_data |=
777 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
781 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
783 * @cid: connection id
784 * @type: ECORE_FILTER_XXX_PENDING
785 * @hdr: pointer to header to setup
788 * currently we always configure one rule and echo field to contain a CID and an
791 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
794 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
795 (type << ECORE_SWCID_SHIFT));
796 hdr->rule_cnt = (uint8_t) rule_cnt;
799 /* hw_config() callbacks */
800 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
801 struct ecore_vlan_mac_obj *o,
802 struct ecore_exeq_elem *elem, int rule_idx,
803 __rte_unused int cam_offset)
805 struct ecore_raw_obj *raw = &o->raw;
806 struct eth_classify_rules_ramrod_data *data =
807 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
808 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
809 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
810 int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
811 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
812 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
814 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
815 * relevant. In addition, current implementation is tuned for a
818 * When multiple unicast ETH MACs PF configuration in switch
819 * independent mode is required (NetQ, multiple netdev MACs,
820 * etc.), consider better utilisation of 8 per function MAC
821 * entries in the LLH register. There is also
822 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
823 * total number of CAM entries to 16.
825 * Currently we won't configure NIG for MACs other than a primary ETH
826 * MAC and iSCSI L2 MAC.
828 * If this MAC is moving from one Queue to another, no need to change
831 if (cmd != ECORE_VLAN_MAC_MOVE) {
832 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
833 ecore_set_mac_in_nig(sc, add, mac,
834 ECORE_LLH_CAM_ISCSI_ETH_LINE);
835 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
836 ecore_set_mac_in_nig(sc, add, mac,
837 ECORE_LLH_CAM_ETH_LINE);
840 /* Reset the ramrod data buffer for the first rule */
842 ECORE_MEMSET(data, 0, sizeof(*data));
844 /* Setup a command header */
845 ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
846 &rule_entry->mac.header);
848 ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
849 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
850 mac[4], mac[5], raw->cl_id);
852 /* Set a MAC itself */
853 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
854 &rule_entry->mac.mac_mid,
855 &rule_entry->mac.mac_lsb, mac);
856 rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
858 /* MOVE: Add a rule that will add this MAC to the target Queue */
859 if (cmd == ECORE_VLAN_MAC_MOVE) {
863 /* Setup ramrod data */
864 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
865 vlan_mac.target_obj, TRUE,
866 CLASSIFY_RULE_OPCODE_MAC,
867 &rule_entry->mac.header);
869 /* Set a MAC itself */
870 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
871 &rule_entry->mac.mac_mid,
872 &rule_entry->mac.mac_lsb, mac);
873 rule_entry->mac.inner_mac =
874 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
877 /* Set the ramrod data header */
878 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
883 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
888 * @cam_offset: offset in cam memory
889 * @hdr: pointer to a header to setup
893 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
894 *o, int type, int cam_offset, struct mac_configuration_hdr
897 struct ecore_raw_obj *r = &o->raw;
900 hdr->offset = (uint8_t) cam_offset;
901 hdr->client_id = ECORE_CPU_TO_LE16(0xff);
902 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
903 (type << ECORE_SWCID_SHIFT));
906 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
907 *o, int add, int opcode,
909 uint16_t vlan_id, struct
910 mac_configuration_entry
913 struct ecore_raw_obj *r = &o->raw;
914 uint32_t cl_bit_vec = (1 << r->cl_id);
916 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
917 cfg_entry->pf_id = r->func_id;
918 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
921 ECORE_SET_FLAG(cfg_entry->flags,
922 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
923 T_ETH_MAC_COMMAND_SET);
924 ECORE_SET_FLAG(cfg_entry->flags,
925 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
928 /* Set a MAC in a ramrod data */
929 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
930 &cfg_entry->middle_mac_addr,
931 &cfg_entry->lsb_mac_addr, mac);
933 ECORE_SET_FLAG(cfg_entry->flags,
934 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
935 T_ETH_MAC_COMMAND_INVALIDATE);
938 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
940 struct ecore_vlan_mac_obj *o,
941 int type, int cam_offset,
942 int add, uint8_t * mac,
943 uint16_t vlan_id, int opcode,
944 struct mac_configuration_cmd
947 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
949 ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
950 ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
953 ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
954 (add ? "setting" : "clearing"),
955 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
956 o->raw.cl_id, cam_offset);
960 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
963 * @o: ecore_vlan_mac_obj
964 * @elem: ecore_exeq_elem
965 * @rule_idx: rule_idx
966 * @cam_offset: cam_offset
968 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
969 struct ecore_vlan_mac_obj *o,
970 struct ecore_exeq_elem *elem,
971 __rte_unused int rule_idx, int cam_offset)
973 struct ecore_raw_obj *raw = &o->raw;
974 struct mac_configuration_cmd *config =
975 (struct mac_configuration_cmd *)(raw->rdata);
976 /* 57711 do not support MOVE command,
977 * so it's either ADD or DEL
979 int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
982 /* Reset the ramrod data buffer */
983 ECORE_MEMSET(config, 0, sizeof(*config));
985 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
987 elem->cmd_data.vlan_mac.u.mac.mac, 0,
988 ETH_VLAN_FILTER_ANY_VLAN, config);
992 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
995 * @p: command parameters
996 * @ppos: pointer to the cookie
998 * reconfigure next MAC/VLAN/VLAN-MAC element from the
999 * previously configured elements list.
1001 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1004 * pointer to the cookie - that should be given back in the next call to make
1005 * function handle the next element. If *ppos is set to NULL it will restart the
1006 * iterator. If returned *ppos == NULL this means that the last element has been
1010 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1011 struct ecore_vlan_mac_ramrod_params *p,
1012 struct ecore_vlan_mac_registry_elem **ppos)
1014 struct ecore_vlan_mac_registry_elem *pos;
1015 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1017 /* If list is empty - there is nothing to do here */
1018 if (ECORE_LIST_IS_EMPTY(&o->head)) {
1023 /* make a step... */
1025 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1026 ecore_vlan_mac_registry_elem,
1029 *ppos = ECORE_LIST_NEXT(*ppos, link,
1030 struct ecore_vlan_mac_registry_elem);
1034 /* If it's the last step - return NULL */
1035 if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1038 /* Prepare a 'user_req' */
1039 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1041 /* Set the command */
1042 p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1044 /* Set vlan_mac_flags */
1045 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1047 /* Set a restore bit */
1048 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1050 return ecore_config_vlan_mac(sc, p);
1053 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1054 * pointer to an element with a specific criteria and NULL if such an element
1055 * hasn't been found.
1057 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1058 struct ecore_exeq_elem *elem)
1060 struct ecore_exeq_elem *pos;
1061 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1063 /* Check pending for execution commands */
1064 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1065 struct ecore_exeq_elem)
1066 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1068 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1075 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1077 * @sc: device handle
1078 * @qo: ecore_qable_obj
1079 * @elem: ecore_exeq_elem
1081 * Checks that the requested configuration can be added. If yes and if
1082 * requested, consume CAM credit.
1084 * The 'validate' is run after the 'optimize'.
1087 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1088 union ecore_qable_obj *qo,
1089 struct ecore_exeq_elem *elem)
1091 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1092 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1095 /* Check the registry */
1096 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1099 "ADD command is not allowed considering current registry state.");
1103 /* Check if there is a pending ADD command for this
1104 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1106 if (exeq->get(exeq, elem)) {
1107 ECORE_MSG(sc, "There is a pending ADD command already");
1108 return ECORE_EXISTS;
1111 /* Consume the credit if not requested not to */
1112 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1113 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1117 return ECORE_SUCCESS;
1121 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1123 * @sc: device handle
1124 * @qo: quable object to check
1125 * @elem: element that needs to be deleted
1127 * Checks that the requested configuration can be deleted. If yes and if
1128 * requested, returns a CAM credit.
1130 * The 'validate' is run after the 'optimize'.
1132 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1133 union ecore_qable_obj *qo,
1134 struct ecore_exeq_elem *elem)
1136 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1137 struct ecore_vlan_mac_registry_elem *pos;
1138 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1139 struct ecore_exeq_elem query_elem;
1141 /* If this classification can not be deleted (doesn't exist)
1142 * - return a ECORE_EXIST.
1144 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1147 "DEL command is not allowed considering current registry state");
1148 return ECORE_EXISTS;
1151 /* Check if there are pending DEL or MOVE commands for this
1152 * MAC/VLAN/VLAN-MAC. Return an error if so.
1154 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1156 /* Check for MOVE commands */
1157 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1158 if (exeq->get(exeq, &query_elem)) {
1159 PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
1163 /* Check for DEL commands */
1164 if (exeq->get(exeq, elem)) {
1165 ECORE_MSG(sc, "There is a pending DEL command already");
1166 return ECORE_EXISTS;
1169 /* Return the credit to the credit pool if not requested not to */
1170 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1171 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1172 o->put_credit(o))) {
1173 PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
1177 return ECORE_SUCCESS;
1181 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1183 * @sc: device handle
1184 * @qo: quable object to check (source)
1185 * @elem: element that needs to be moved
1187 * Checks that the requested configuration can be moved. If yes and if
1188 * requested, returns a CAM credit.
1190 * The 'validate' is run after the 'optimize'.
1192 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1193 union ecore_qable_obj *qo,
1194 struct ecore_exeq_elem *elem)
1196 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1197 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1198 struct ecore_exeq_elem query_elem;
1199 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1200 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1202 /* Check if we can perform this operation based on the current registry
1205 if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1207 "MOVE command is not allowed considering current registry state");
1211 /* Check if there is an already pending DEL or MOVE command for the
1212 * source object or ADD command for a destination object. Return an
1215 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1217 /* Check DEL on source */
1218 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1219 if (src_exeq->get(src_exeq, &query_elem)) {
1220 PMD_DRV_LOG(ERR, sc,
1221 "There is a pending DEL command on the source queue already");
1225 /* Check MOVE on source */
1226 if (src_exeq->get(src_exeq, elem)) {
1227 ECORE_MSG(sc, "There is a pending MOVE command already");
1228 return ECORE_EXISTS;
1231 /* Check ADD on destination */
1232 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1233 if (dest_exeq->get(dest_exeq, &query_elem)) {
1234 PMD_DRV_LOG(ERR, sc,
1235 "There is a pending ADD command on the destination queue already");
1239 /* Consume the credit if not requested not to */
1240 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1241 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1242 dest_o->get_credit(dest_o)))
1245 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1246 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1247 src_o->put_credit(src_o))) {
1248 /* return the credit taken from dest... */
1249 dest_o->put_credit(dest_o);
1253 return ECORE_SUCCESS;
1256 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1257 union ecore_qable_obj *qo,
1258 struct ecore_exeq_elem *elem)
1260 switch (elem->cmd_data.vlan_mac.cmd) {
1261 case ECORE_VLAN_MAC_ADD:
1262 return ecore_validate_vlan_mac_add(sc, qo, elem);
1263 case ECORE_VLAN_MAC_DEL:
1264 return ecore_validate_vlan_mac_del(sc, qo, elem);
1265 case ECORE_VLAN_MAC_MOVE:
1266 return ecore_validate_vlan_mac_move(sc, qo, elem);
1272 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1273 union ecore_qable_obj *qo,
1274 struct ecore_exeq_elem *elem)
1278 /* If consumption wasn't required, nothing to do */
1279 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1280 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1281 return ECORE_SUCCESS;
1283 switch (elem->cmd_data.vlan_mac.cmd) {
1284 case ECORE_VLAN_MAC_ADD:
1285 case ECORE_VLAN_MAC_MOVE:
1286 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1288 case ECORE_VLAN_MAC_DEL:
1289 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1298 return ECORE_SUCCESS;
1302 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1304 * @sc: device handle
1305 * @o: ecore_vlan_mac_obj
1308 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1309 struct ecore_vlan_mac_obj *o)
1312 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1313 struct ecore_raw_obj *raw = &o->raw;
1316 /* Wait for the current command to complete */
1317 rc = raw->wait_comp(sc, raw);
1321 /* Wait until there are no pending commands */
1322 if (!ecore_exe_queue_empty(exeq))
1323 ECORE_WAIT(sc, 1000);
1325 return ECORE_SUCCESS;
1328 return ECORE_TIMEOUT;
1331 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1332 struct ecore_vlan_mac_obj *o,
1333 unsigned long *ramrod_flags)
1335 int rc = ECORE_SUCCESS;
1337 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1339 ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
1340 rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1342 if (rc != ECORE_SUCCESS) {
1343 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1345 /** Calling function should not diffrentiate between this case
1346 * and the case in which there is already a pending ramrod
1350 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1352 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1358 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1360 * @sc: device handle
1361 * @o: ecore_vlan_mac_obj
1363 * @cont: if TRUE schedule next execution chunk
1366 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1367 struct ecore_vlan_mac_obj *o,
1368 union event_ring_elem *cqe,
1369 unsigned long *ramrod_flags)
1371 struct ecore_raw_obj *r = &o->raw;
1374 /* Reset pending list */
1375 ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1378 r->clear_pending(r);
1380 /* If ramrod failed this is most likely a SW bug */
1381 if (cqe->message.error)
1384 /* Run the next bulk of pending commands if requested */
1385 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1386 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1391 /* If there is more work to do return PENDING */
1392 if (!ecore_exe_queue_empty(&o->exe_queue))
1393 return ECORE_PENDING;
1395 return ECORE_SUCCESS;
1399 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1401 * @sc: device handle
1402 * @o: ecore_qable_obj
1403 * @elem: ecore_exeq_elem
1405 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1406 union ecore_qable_obj *qo,
1407 struct ecore_exeq_elem *elem)
1409 struct ecore_exeq_elem query, *pos;
1410 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1411 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1413 ECORE_MEMCPY(&query, elem, sizeof(query));
1415 switch (elem->cmd_data.vlan_mac.cmd) {
1416 case ECORE_VLAN_MAC_ADD:
1417 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1419 case ECORE_VLAN_MAC_DEL:
1420 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1423 /* Don't handle anything other than ADD or DEL */
1427 /* If we found the appropriate element - delete it */
1428 pos = exeq->get(exeq, &query);
1431 /* Return the credit of the optimized command */
1432 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1433 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1434 if ((query.cmd_data.vlan_mac.cmd ==
1435 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1436 PMD_DRV_LOG(ERR, sc,
1437 "Failed to return the credit for the optimized ADD command");
1439 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1440 PMD_DRV_LOG(ERR, sc,
1441 "Failed to recover the credit from the optimized DEL command");
1446 ECORE_MSG(sc, "Optimizing %s command",
1447 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1450 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1451 ecore_exe_queue_free_elem(sc, pos);
1459 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1461 * @sc: device handle
1467 * prepare a registry element according to the current command request.
1469 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1470 struct ecore_vlan_mac_obj *o,
1471 struct ecore_exeq_elem *elem,
1473 ecore_vlan_mac_registry_elem
1476 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1477 struct ecore_vlan_mac_registry_elem *reg_elem;
1479 /* Allocate a new registry element if needed. */
1481 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1482 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1486 /* Get a new CAM offset */
1487 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1488 /* This shall never happen, because we have checked the
1489 * CAM availability in the 'validate'.
1491 ECORE_DBG_BREAK_IF(1);
1492 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1496 ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
1498 /* Set a VLAN-MAC data */
1499 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u,
1500 sizeof(reg_elem->u));
1502 /* Copy the flags (needed for DEL and RESTORE flows) */
1503 reg_elem->vlan_mac_flags =
1504 elem->cmd_data.vlan_mac.vlan_mac_flags;
1505 } else /* DEL, RESTORE */
1506 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1509 return ECORE_SUCCESS;
1513 * ecore_execute_vlan_mac - execute vlan mac command
1515 * @sc: device handle
1520 * go and send a ramrod!
1522 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1523 union ecore_qable_obj *qo,
1524 ecore_list_t * exe_chunk,
1525 unsigned long *ramrod_flags)
1527 struct ecore_exeq_elem *elem;
1528 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1529 struct ecore_raw_obj *r = &o->raw;
1531 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1532 int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1533 struct ecore_vlan_mac_registry_elem *reg_elem;
1534 enum ecore_vlan_mac_cmd cmd;
1536 /* If DRIVER_ONLY execution is requested, cleanup a registry
1537 * and exit. Otherwise send a ramrod to FW.
1544 /* Fill the ramrod data */
1545 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1546 struct ecore_exeq_elem) {
1547 cmd = elem->cmd_data.vlan_mac.cmd;
1548 /* We will add to the target object in MOVE command, so
1549 * change the object for a CAM search.
1551 if (cmd == ECORE_VLAN_MAC_MOVE)
1552 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1556 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1562 ECORE_DBG_BREAK_IF(!reg_elem);
1564 /* Push a new entry into the registry */
1566 ((cmd == ECORE_VLAN_MAC_ADD) ||
1567 (cmd == ECORE_VLAN_MAC_MOVE)))
1568 ECORE_LIST_PUSH_HEAD(®_elem->link,
1571 /* Configure a single command in a ramrod data buffer */
1572 o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1574 /* MOVE command consumes 2 entries in the ramrod data */
1575 if (cmd == ECORE_VLAN_MAC_MOVE)
1582 * No need for an explicit memory barrier here as long we would
1583 * need to ensure the ordering of writing to the SPQ element
1584 * and updating of the SPQ producer which involves a memory
1585 * read and we will have to put a full memory barrier there
1586 * (inside ecore_sp_post()).
1589 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1590 r->rdata_mapping, ETH_CONNECTION_TYPE);
1595 /* Now, when we are done with the ramrod - clean up the registry */
1596 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1597 cmd = elem->cmd_data.vlan_mac.cmd;
1598 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1599 reg_elem = o->check_del(sc, o,
1600 &elem->cmd_data.vlan_mac.u);
1602 ECORE_DBG_BREAK_IF(!reg_elem);
1604 o->put_cam_offset(o, reg_elem->cam_offset);
1605 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head);
1606 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1611 return ECORE_PENDING;
1613 return ECORE_SUCCESS;
1616 r->clear_pending(r);
1618 /* Cleanup a registry in case of a failure */
1619 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1620 cmd = elem->cmd_data.vlan_mac.cmd;
1622 if (cmd == ECORE_VLAN_MAC_MOVE)
1623 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1627 /* Delete all newly added above entries */
1629 ((cmd == ECORE_VLAN_MAC_ADD) ||
1630 (cmd == ECORE_VLAN_MAC_MOVE))) {
1631 reg_elem = o->check_del(sc, cam_obj,
1632 &elem->cmd_data.vlan_mac.u);
1634 ECORE_LIST_REMOVE_ENTRY(®_elem->link,
1636 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1644 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1645 ecore_vlan_mac_ramrod_params *p)
1647 struct ecore_exeq_elem *elem;
1648 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1649 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1651 /* Allocate the execution queue element */
1652 elem = ecore_exe_queue_alloc_elem(sc);
1656 /* Set the command 'length' */
1657 switch (p->user_req.cmd) {
1658 case ECORE_VLAN_MAC_MOVE:
1665 /* Fill the object specific info */
1666 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1667 sizeof(p->user_req));
1669 /* Try to add a new command to the pending list */
1670 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1674 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1676 * @sc: device handle
1680 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1681 struct ecore_vlan_mac_ramrod_params *p)
1683 int rc = ECORE_SUCCESS;
1684 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1685 unsigned long *ramrod_flags = &p->ramrod_flags;
1686 int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1687 struct ecore_raw_obj *raw = &o->raw;
1690 * Add new elements to the execution list for commands that require it.
1693 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1698 /* If nothing will be executed further in this iteration we want to
1699 * return PENDING if there are pending commands
1701 if (!ecore_exe_queue_empty(&o->exe_queue))
1704 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1706 "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1707 raw->clear_pending(raw);
1710 /* Execute commands if required */
1711 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1712 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1713 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1719 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1720 * then user want to wait until the last command is done.
1722 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1723 /* Wait maximum for the current exe_queue length iterations plus
1724 * one (for the current pending command).
1726 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1728 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1731 /* Wait for the current command to complete */
1732 rc = raw->wait_comp(sc, raw);
1736 /* Make a next step */
1737 rc = __ecore_vlan_mac_execute_step(sc,
1744 return ECORE_SUCCESS;
1751 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1753 * @sc: device handle
1756 * @ramrod_flags: execution flags to be used for this deletion
1758 * if the last operation has completed successfully and there are no
1759 * more elements left, positive value if the last operation has completed
1760 * successfully and there are more previously configured elements, negative
1761 * value is current operation has failed.
1763 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1764 struct ecore_vlan_mac_obj *o,
1765 unsigned long *vlan_mac_flags,
1766 unsigned long *ramrod_flags)
1768 struct ecore_vlan_mac_registry_elem *pos = NULL;
1769 int rc = 0, read_lock;
1770 struct ecore_vlan_mac_ramrod_params p;
1771 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1772 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1774 /* Clear pending commands first */
1776 ECORE_SPIN_LOCK_BH(&exeq->lock);
1778 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1779 &exeq->exe_queue, link,
1780 struct ecore_exeq_elem) {
1781 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1783 rc = exeq->remove(sc, exeq->owner, exeq_pos);
1785 PMD_DRV_LOG(ERR, sc, "Failed to remove command");
1786 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1789 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1791 ecore_exe_queue_free_elem(sc, exeq_pos);
1795 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1797 /* Prepare a command request */
1798 ECORE_MEMSET(&p, 0, sizeof(p));
1800 p.ramrod_flags = *ramrod_flags;
1801 p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1803 /* Add all but the last VLAN-MAC to the execution queue without actually
1804 * execution anything.
1806 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1807 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1808 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1810 ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1811 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1812 if (read_lock != ECORE_SUCCESS)
1815 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1816 struct ecore_vlan_mac_registry_elem) {
1817 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1818 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1819 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1820 rc = ecore_config_vlan_mac(sc, &p);
1822 PMD_DRV_LOG(ERR, sc,
1823 "Failed to add a new DEL command");
1824 ecore_vlan_mac_h_read_unlock(sc, o);
1830 ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1831 ecore_vlan_mac_h_read_unlock(sc, o);
1833 p.ramrod_flags = *ramrod_flags;
1834 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1836 return ecore_config_vlan_mac(sc, &p);
1839 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1840 uint32_t cid, uint8_t func_id,
1842 ecore_dma_addr_t rdata_mapping, int state,
1843 unsigned long *pstate, ecore_obj_type type)
1845 raw->func_id = func_id;
1849 raw->rdata_mapping = rdata_mapping;
1851 raw->pstate = pstate;
1852 raw->obj_type = type;
1853 raw->check_pending = ecore_raw_check_pending;
1854 raw->clear_pending = ecore_raw_clear_pending;
1855 raw->set_pending = ecore_raw_set_pending;
1856 raw->wait_comp = ecore_raw_wait;
1859 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1860 uint8_t cl_id, uint32_t cid,
1861 uint8_t func_id, void *rdata,
1862 ecore_dma_addr_t rdata_mapping,
1863 int state, unsigned long *pstate,
1864 ecore_obj_type type,
1865 struct ecore_credit_pool_obj
1866 *macs_pool, struct ecore_credit_pool_obj
1869 ECORE_LIST_INIT(&o->head);
1871 o->head_exe_request = FALSE;
1872 o->saved_ramrod_flags = 0;
1874 o->macs_pool = macs_pool;
1875 o->vlans_pool = vlans_pool;
1877 o->delete_all = ecore_vlan_mac_del_all;
1878 o->restore = ecore_vlan_mac_restore;
1879 o->complete = ecore_complete_vlan_mac;
1880 o->wait = ecore_wait_vlan_mac;
1882 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1883 state, pstate, type);
1886 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1887 struct ecore_vlan_mac_obj *mac_obj,
1888 uint8_t cl_id, uint32_t cid, uint8_t func_id,
1889 void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1890 unsigned long *pstate, ecore_obj_type type,
1891 struct ecore_credit_pool_obj *macs_pool)
1893 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1895 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1896 rdata_mapping, state, pstate, type,
1899 /* CAM credit pool handling */
1900 mac_obj->get_credit = ecore_get_credit_mac;
1901 mac_obj->put_credit = ecore_put_credit_mac;
1902 mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1903 mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1905 if (CHIP_IS_E1x(sc)) {
1906 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1907 mac_obj->check_del = ecore_check_mac_del;
1908 mac_obj->check_add = ecore_check_mac_add;
1909 mac_obj->check_move = ecore_check_move_always_err;
1910 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1913 ecore_exe_queue_init(sc,
1914 &mac_obj->exe_queue, 1, qable_obj,
1915 ecore_validate_vlan_mac,
1916 ecore_remove_vlan_mac,
1917 ecore_optimize_vlan_mac,
1918 ecore_execute_vlan_mac,
1919 ecore_exeq_get_mac);
1921 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1922 mac_obj->check_del = ecore_check_mac_del;
1923 mac_obj->check_add = ecore_check_mac_add;
1924 mac_obj->check_move = ecore_check_move;
1925 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1926 mac_obj->get_n_elements = ecore_get_n_elements;
1929 ecore_exe_queue_init(sc,
1930 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1931 qable_obj, ecore_validate_vlan_mac,
1932 ecore_remove_vlan_mac,
1933 ecore_optimize_vlan_mac,
1934 ecore_execute_vlan_mac,
1935 ecore_exeq_get_mac);
1939 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1940 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1941 tstorm_eth_mac_filter_config
1942 *mac_filters, uint16_t pf_id)
1944 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1946 uint32_t addr = BAR_TSTRORM_INTMEM +
1947 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1949 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1952 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1953 struct ecore_rx_mode_ramrod_params *p)
1955 /* update the sc MAC filter structure */
1956 uint32_t mask = (1 << p->cl_id);
1958 struct tstorm_eth_mac_filter_config *mac_filters =
1959 (struct tstorm_eth_mac_filter_config *)p->rdata;
1961 /* initial setting is drop-all */
1962 uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1963 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1964 uint8_t unmatched_unicast = 0;
1966 /* In e1x there we only take into account rx accept flag since tx switching
1968 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1969 /* accept matched ucast */
1972 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1973 /* accept matched mcast */
1976 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1977 /* accept all mcast */
1981 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1982 /* accept all mcast */
1986 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1987 /* accept (all) bcast */
1989 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1990 /* accept unmatched unicasts */
1991 unmatched_unicast = 1;
1993 mac_filters->ucast_drop_all = drop_all_ucast ?
1994 mac_filters->ucast_drop_all | mask :
1995 mac_filters->ucast_drop_all & ~mask;
1997 mac_filters->mcast_drop_all = drop_all_mcast ?
1998 mac_filters->mcast_drop_all | mask :
1999 mac_filters->mcast_drop_all & ~mask;
2001 mac_filters->ucast_accept_all = accp_all_ucast ?
2002 mac_filters->ucast_accept_all | mask :
2003 mac_filters->ucast_accept_all & ~mask;
2005 mac_filters->mcast_accept_all = accp_all_mcast ?
2006 mac_filters->mcast_accept_all | mask :
2007 mac_filters->mcast_accept_all & ~mask;
2009 mac_filters->bcast_accept_all = accp_all_bcast ?
2010 mac_filters->bcast_accept_all | mask :
2011 mac_filters->bcast_accept_all & ~mask;
2013 mac_filters->unmatched_unicast = unmatched_unicast ?
2014 mac_filters->unmatched_unicast | mask :
2015 mac_filters->unmatched_unicast & ~mask;
2017 ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2018 "accp_mcast 0x%xaccp_bcast 0x%x",
2019 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2020 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2021 mac_filters->bcast_accept_all);
2023 /* write the MAC filter structure */
2024 __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2026 /* The operation is completed */
2027 ECORE_CLEAR_BIT(p->state, p->pstate);
2028 ECORE_SMP_MB_AFTER_CLEAR_BIT();
2030 return ECORE_SUCCESS;
2033 /* Setup ramrod data */
2034 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2035 *hdr, uint8_t rule_cnt)
2037 hdr->echo = ECORE_CPU_TO_LE32(cid);
2038 hdr->rule_cnt = rule_cnt;
2041 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2042 *cmd, int clear_accept_all)
2046 /* start with 'drop-all' */
2047 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2048 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2050 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2051 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2053 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2054 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2056 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2057 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2058 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2061 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2062 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2063 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2065 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2066 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2068 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2069 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2070 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2072 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2073 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2075 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2076 if (clear_accept_all) {
2077 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2078 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2079 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2080 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2083 cmd->state = ECORE_CPU_TO_LE16(state);
2086 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2087 struct ecore_rx_mode_ramrod_params *p)
2089 struct eth_filter_rules_ramrod_data *data = p->rdata;
2091 uint8_t rule_idx = 0;
2093 /* Reset the ramrod data buffer */
2094 ECORE_MEMSET(data, 0, sizeof(*data));
2096 /* Setup ramrod data */
2098 /* Tx (internal switching) */
2099 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2100 data->rules[rule_idx].client_id = p->cl_id;
2101 data->rules[rule_idx].func_id = p->func_id;
2103 data->rules[rule_idx].cmd_general_data =
2104 ETH_FILTER_RULES_CMD_TX_CMD;
2106 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2107 &(data->rules[rule_idx++]),
2112 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2113 data->rules[rule_idx].client_id = p->cl_id;
2114 data->rules[rule_idx].func_id = p->func_id;
2116 data->rules[rule_idx].cmd_general_data =
2117 ETH_FILTER_RULES_CMD_RX_CMD;
2119 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2120 &(data->rules[rule_idx++]),
2124 /* If FCoE Queue configuration has been requested configure the Rx and
2125 * internal switching modes for this queue in separate rules.
2127 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2128 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2130 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2131 /* Tx (internal switching) */
2132 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2133 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2134 data->rules[rule_idx].func_id = p->func_id;
2136 data->rules[rule_idx].cmd_general_data =
2137 ETH_FILTER_RULES_CMD_TX_CMD;
2139 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2141 [rule_idx++]), TRUE);
2145 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2146 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2147 data->rules[rule_idx].func_id = p->func_id;
2149 data->rules[rule_idx].cmd_general_data =
2150 ETH_FILTER_RULES_CMD_RX_CMD;
2152 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2154 [rule_idx++]), TRUE);
2158 /* Set the ramrod header (most importantly - number of rules to
2161 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2164 (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2165 data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2167 /* No need for an explicit memory barrier here as long we would
2168 * need to ensure the ordering of writing to the SPQ element
2169 * and updating of the SPQ producer which involves a memory
2170 * read and we will have to put a full memory barrier there
2171 * (inside ecore_sp_post()).
2175 rc = ecore_sp_post(sc,
2176 RAMROD_CMD_ID_ETH_FILTER_RULES,
2177 p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2181 /* Ramrod completion is pending */
2182 return ECORE_PENDING;
2185 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2186 struct ecore_rx_mode_ramrod_params *p)
2188 return ecore_state_wait(sc, p->state, p->pstate);
2191 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2193 ecore_rx_mode_ramrod_params *p)
2196 return ECORE_SUCCESS;
2199 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2200 struct ecore_rx_mode_ramrod_params *p)
2204 /* Configure the new classification in the chip */
2205 if (p->rx_mode_obj->config_rx_mode) {
2206 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2210 /* Wait for a ramrod completion if was requested */
2211 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2212 rc = p->rx_mode_obj->wait_comp(sc, p);
2217 ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
2224 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2226 if (CHIP_IS_E1x(sc)) {
2227 o->wait_comp = ecore_empty_rx_mode_wait;
2228 o->config_rx_mode = ecore_set_rx_mode_e1x;
2230 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2231 o->config_rx_mode = ecore_set_rx_mode_e2;
2235 /********************* Multicast verbs: SET, CLEAR ****************************/
2236 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2238 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2241 struct ecore_mcast_mac_elem {
2242 ecore_list_entry_t link;
2243 uint8_t mac[ETH_ALEN];
2244 uint8_t pad[2]; /* For a natural alignment of the following buffer */
2247 struct ecore_pending_mcast_cmd {
2248 ecore_list_entry_t link;
2249 int type; /* ECORE_MCAST_CMD_X */
2251 ecore_list_t macs_head;
2252 uint32_t macs_num; /* Needed for DEL command */
2253 int next_bin; /* Needed for RESTORE flow with aprox match */
2256 int done; /* set to TRUE, when the command has been handled,
2257 * practically used in 57712 handling only, where one pending
2258 * command may be handled in a few operations. As long as for
2259 * other chips every operation handling is completed in a
2260 * single ramrod, there is no need to utilize this field.
2264 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2266 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2267 o->raw.wait_comp(sc, &o->raw))
2268 return ECORE_TIMEOUT;
2270 return ECORE_SUCCESS;
2273 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2274 struct ecore_mcast_obj *o,
2275 struct ecore_mcast_ramrod_params *p,
2276 enum ecore_mcast_cmd cmd)
2279 struct ecore_pending_mcast_cmd *new_cmd;
2280 struct ecore_mcast_mac_elem *cur_mac = NULL;
2281 struct ecore_mcast_list_elem *pos;
2282 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2283 p->mcast_list_len : 0);
2285 /* If the command is empty ("handle pending commands only"), break */
2286 if (!p->mcast_list_len)
2287 return ECORE_SUCCESS;
2289 total_sz = sizeof(*new_cmd) +
2290 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2292 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2293 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2298 ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
2299 cmd, macs_list_len);
2301 ECORE_LIST_INIT(&new_cmd->data.macs_head);
2303 new_cmd->type = cmd;
2304 new_cmd->done = FALSE;
2307 case ECORE_MCAST_CMD_ADD:
2308 cur_mac = (struct ecore_mcast_mac_elem *)
2309 ((uint8_t *) new_cmd + sizeof(*new_cmd));
2311 /* Push the MACs of the current command into the pending command
2314 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2315 struct ecore_mcast_list_elem) {
2316 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2317 ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2318 &new_cmd->data.macs_head);
2324 case ECORE_MCAST_CMD_DEL:
2325 new_cmd->data.macs_num = p->mcast_list_len;
2328 case ECORE_MCAST_CMD_RESTORE:
2329 new_cmd->data.next_bin = 0;
2333 ECORE_FREE(sc, new_cmd, total_sz);
2334 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2338 /* Push the new pending command to the tail of the pending list: FIFO */
2339 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2343 return ECORE_PENDING;
2347 * ecore_mcast_get_next_bin - get the next set bin (index)
2350 * @last: index to start looking from (including)
2352 * returns the next found (set) bin or a negative value if none is found.
2354 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2356 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2358 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2359 if (o->registry.aprox_match.vec[i])
2360 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2361 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2362 if (BIT_VEC64_TEST_BIT
2363 (o->registry.aprox_match.vec, cur_bit)) {
2375 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2379 * returns the index of the found bin or -1 if none is found
2381 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2383 int cur_bit = ecore_mcast_get_next_bin(o, 0);
2386 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2391 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2393 struct ecore_raw_obj *raw = &o->raw;
2394 uint8_t rx_tx_flag = 0;
2396 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2397 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2398 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2400 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2401 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2402 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2407 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2408 struct ecore_mcast_obj *o, int idx,
2409 union ecore_mcast_config_data *cfg_data,
2410 enum ecore_mcast_cmd cmd)
2412 struct ecore_raw_obj *r = &o->raw;
2413 struct eth_multicast_rules_ramrod_data *data =
2414 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2415 uint8_t func_id = r->func_id;
2416 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2419 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2420 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2422 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2424 /* Get a bin and update a bins' vector */
2426 case ECORE_MCAST_CMD_ADD:
2427 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2428 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2431 case ECORE_MCAST_CMD_DEL:
2432 /* If there were no more bins to clear
2433 * (ecore_mcast_clear_first_bin() returns -1) then we would
2434 * clear any (0xff) bin.
2435 * See ecore_mcast_validate_e2() for explanation when it may
2438 bin = ecore_mcast_clear_first_bin(o);
2441 case ECORE_MCAST_CMD_RESTORE:
2442 bin = cfg_data->bin;
2446 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2450 ECORE_MSG(sc, "%s bin %d",
2451 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2452 "Setting" : "Clearing"), bin);
2454 data->rules[idx].bin_id = (uint8_t) bin;
2455 data->rules[idx].func_id = func_id;
2456 data->rules[idx].engine_id = o->engine_id;
2460 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2462 * @sc: device handle
2464 * @start_bin: index in the registry to start from (including)
2465 * @rdata_idx: index in the ramrod data to start from
2467 * returns last handled bin index or -1 if all bins have been handled
2469 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2470 struct ecore_mcast_obj *o,
2471 int start_bin, int *rdata_idx)
2473 int cur_bin, cnt = *rdata_idx;
2474 union ecore_mcast_config_data cfg_data = { NULL };
2476 /* go through the registry and configure the bins from it */
2477 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2478 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2480 cfg_data.bin = (uint8_t) cur_bin;
2481 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2485 ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
2487 /* Break if we reached the maximum number
2490 if (cnt >= o->max_cmd_len)
2499 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2500 struct ecore_mcast_obj *o,
2501 struct ecore_pending_mcast_cmd
2502 *cmd_pos, int *line_idx)
2504 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2505 int cnt = *line_idx;
2506 union ecore_mcast_config_data cfg_data = { NULL };
2508 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2509 &cmd_pos->data.macs_head, link,
2510 struct ecore_mcast_mac_elem) {
2512 cfg_data.mac = &pmac_pos->mac[0];
2513 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2518 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2519 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2520 pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2522 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2523 &cmd_pos->data.macs_head);
2525 /* Break if we reached the maximum number
2528 if (cnt >= o->max_cmd_len)
2534 /* if no more MACs to configure - we are done */
2535 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2536 cmd_pos->done = TRUE;
2539 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2540 struct ecore_mcast_obj *o,
2541 struct ecore_pending_mcast_cmd
2542 *cmd_pos, int *line_idx)
2544 int cnt = *line_idx;
2546 while (cmd_pos->data.macs_num) {
2547 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2551 cmd_pos->data.macs_num--;
2553 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
2554 cmd_pos->data.macs_num, cnt);
2556 /* Break if we reached the maximum
2559 if (cnt >= o->max_cmd_len)
2565 /* If we cleared all bins - we are done */
2566 if (!cmd_pos->data.macs_num)
2567 cmd_pos->done = TRUE;
2570 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2571 struct ecore_mcast_obj *o, struct
2572 ecore_pending_mcast_cmd
2573 *cmd_pos, int *line_idx)
2575 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2578 if (cmd_pos->data.next_bin < 0)
2579 /* If o->set_restore returned -1 we are done */
2580 cmd_pos->done = TRUE;
2582 /* Start from the next bin next time */
2583 cmd_pos->data.next_bin++;
2586 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2587 ecore_mcast_ramrod_params
2590 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2592 struct ecore_mcast_obj *o = p->mcast_obj;
2594 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2595 &o->pending_cmds_head, link,
2596 struct ecore_pending_mcast_cmd) {
2597 switch (cmd_pos->type) {
2598 case ECORE_MCAST_CMD_ADD:
2599 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2602 case ECORE_MCAST_CMD_DEL:
2603 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2606 case ECORE_MCAST_CMD_RESTORE:
2607 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2612 PMD_DRV_LOG(ERR, sc,
2613 "Unknown command: %d", cmd_pos->type);
2617 /* If the command has been completed - remove it from the list
2618 * and free the memory
2620 if (cmd_pos->done) {
2621 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2622 &o->pending_cmds_head);
2623 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2626 /* Break if we reached the maximum number of rules */
2627 if (cnt >= o->max_cmd_len)
2634 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2635 struct ecore_mcast_obj *o,
2636 struct ecore_mcast_ramrod_params *p,
2639 struct ecore_mcast_list_elem *mlist_pos;
2640 union ecore_mcast_config_data cfg_data = { NULL };
2641 int cnt = *line_idx;
2643 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2644 struct ecore_mcast_list_elem) {
2645 cfg_data.mac = mlist_pos->mac;
2646 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2651 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2652 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2653 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2659 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2660 struct ecore_mcast_obj *o,
2661 struct ecore_mcast_ramrod_params *p,
2664 int cnt = *line_idx, i;
2666 for (i = 0; i < p->mcast_list_len; i++) {
2667 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2672 "Deleting MAC. %d left", p->mcast_list_len - i - 1);
2679 * ecore_mcast_handle_current_cmd -
2681 * @sc: device handle
2684 * @start_cnt: first line in the ramrod data that may be used
2686 * This function is called if there is enough place for the current command in
2688 * Returns number of lines filled in the ramrod data in total.
2690 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2691 ecore_mcast_ramrod_params *p,
2692 enum ecore_mcast_cmd cmd,
2695 struct ecore_mcast_obj *o = p->mcast_obj;
2696 int cnt = start_cnt;
2698 ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
2701 case ECORE_MCAST_CMD_ADD:
2702 ecore_mcast_hdl_add(sc, o, p, &cnt);
2705 case ECORE_MCAST_CMD_DEL:
2706 ecore_mcast_hdl_del(sc, o, p, &cnt);
2709 case ECORE_MCAST_CMD_RESTORE:
2710 o->hdl_restore(sc, o, 0, &cnt);
2714 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2718 /* The current command has been handled */
2719 p->mcast_list_len = 0;
2724 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2725 struct ecore_mcast_ramrod_params *p,
2726 enum ecore_mcast_cmd cmd)
2728 struct ecore_mcast_obj *o = p->mcast_obj;
2729 int reg_sz = o->get_registry_size(o);
2732 /* DEL command deletes all currently configured MACs */
2733 case ECORE_MCAST_CMD_DEL:
2734 o->set_registry_size(o, 0);
2737 /* RESTORE command will restore the entire multicast configuration */
2738 case ECORE_MCAST_CMD_RESTORE:
2739 /* Here we set the approximate amount of work to do, which in
2740 * fact may be only less as some MACs in postponed ADD
2741 * command(s) scheduled before this command may fall into
2742 * the same bin and the actual number of bins set in the
2743 * registry would be less than we estimated here. See
2744 * ecore_mcast_set_one_rule_e2() for further details.
2746 p->mcast_list_len = reg_sz;
2749 case ECORE_MCAST_CMD_ADD:
2750 case ECORE_MCAST_CMD_CONT:
2751 /* Here we assume that all new MACs will fall into new bins.
2752 * However we will correct the real registry size after we
2753 * handle all pending commands.
2755 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2759 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2763 /* Increase the total number of MACs pending to be configured */
2764 o->total_pending_num += p->mcast_list_len;
2766 return ECORE_SUCCESS;
2769 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2770 struct ecore_mcast_ramrod_params *p,
2773 struct ecore_mcast_obj *o = p->mcast_obj;
2775 o->set_registry_size(o, old_num_bins);
2776 o->total_pending_num -= p->mcast_list_len;
2780 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2782 * @sc: device handle
2784 * @len: number of rules to handle
2786 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2787 *sc, struct ecore_mcast_ramrod_params
2790 struct ecore_raw_obj *r = &p->mcast_obj->raw;
2791 struct eth_multicast_rules_ramrod_data *data =
2792 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2794 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2795 (ECORE_FILTER_MCAST_PENDING <<
2796 ECORE_SWCID_SHIFT));
2797 data->header.rule_cnt = len;
2801 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2803 * @sc: device handle
2806 * Recalculate the actual number of set bins in the registry using Brian
2807 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2809 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2814 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2815 elem = o->registry.aprox_match.vec[i];
2820 o->set_registry_size(o, cnt);
2822 return ECORE_SUCCESS;
2825 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2826 struct ecore_mcast_ramrod_params *p,
2827 enum ecore_mcast_cmd cmd)
2829 struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2830 struct ecore_mcast_obj *o = p->mcast_obj;
2831 struct eth_multicast_rules_ramrod_data *data =
2832 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2835 /* Reset the ramrod data buffer */
2836 ECORE_MEMSET(data, 0, sizeof(*data));
2838 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2840 /* If there are no more pending commands - clear SCHEDULED state */
2841 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2844 /* The below may be TRUE if there was enough room in ramrod
2845 * data for all pending commands and for the current
2846 * command. Otherwise the current command would have been added
2847 * to the pending commands and p->mcast_list_len would have been
2850 if (p->mcast_list_len > 0)
2851 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2853 /* We've pulled out some MACs - update the total number of
2856 o->total_pending_num -= cnt;
2859 ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2860 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2862 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2864 /* Update a registry size if there are no more pending operations.
2866 * We don't want to change the value of the registry size if there are
2867 * pending operations because we want it to always be equal to the
2868 * exact or the approximate number (see ecore_mcast_validate_e2()) of
2869 * set bins after the last requested operation in order to properly
2870 * evaluate the size of the next DEL/RESTORE operation.
2872 * Note that we update the registry itself during command(s) handling
2873 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2874 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2875 * with a limited amount of update commands (per MAC/bin) and we don't
2876 * know in this scope what the actual state of bins configuration is
2877 * going to be after this ramrod.
2879 if (!o->total_pending_num)
2880 ecore_mcast_refresh_registry_e2(o);
2882 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2883 * RAMROD_PENDING status immediately.
2885 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2886 raw->clear_pending(raw);
2887 return ECORE_SUCCESS;
2889 /* No need for an explicit memory barrier here as long we would
2890 * need to ensure the ordering of writing to the SPQ element
2891 * and updating of the SPQ producer which involves a memory
2892 * read and we will have to put a full memory barrier there
2893 * (inside ecore_sp_post()).
2897 rc = ecore_sp_post(sc,
2898 RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2900 raw->rdata_mapping, ETH_CONNECTION_TYPE);
2904 /* Ramrod completion is pending */
2905 return ECORE_PENDING;
2909 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2910 struct ecore_mcast_ramrod_params *p,
2911 enum ecore_mcast_cmd cmd)
2913 /* Mark, that there is a work to do */
2914 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2915 p->mcast_list_len = 1;
2917 return ECORE_SUCCESS;
2920 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2921 __rte_unused struct ecore_mcast_ramrod_params
2922 *p, __rte_unused int old_num_bins)
2927 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2929 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2932 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2933 struct ecore_mcast_obj *o,
2934 struct ecore_mcast_ramrod_params *p,
2935 uint32_t * mc_filter)
2937 struct ecore_mcast_list_elem *mlist_pos;
2940 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2941 struct ecore_mcast_list_elem) {
2942 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2943 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2946 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2947 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2948 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2951 /* bookkeeping... */
2952 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2956 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2958 struct ecore_mcast_obj *o,
2959 uint32_t * mc_filter)
2963 for (bit = ecore_mcast_get_next_bin(o, 0);
2964 bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2965 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2966 ECORE_MSG(sc, "About to set bin %d", bit);
2970 /* On 57711 we write the multicast MACs' approximate match
2971 * table by directly into the TSTORM's internal RAM. So we don't
2972 * really need to handle any tricks to make it work.
2974 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2975 struct ecore_mcast_ramrod_params *p,
2976 enum ecore_mcast_cmd cmd)
2979 struct ecore_mcast_obj *o = p->mcast_obj;
2980 struct ecore_raw_obj *r = &o->raw;
2982 /* If CLEAR_ONLY has been requested - clear the registry
2983 * and clear a pending bit.
2985 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2986 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2988 /* Set the multicast filter bits before writing it into
2989 * the internal memory.
2992 case ECORE_MCAST_CMD_ADD:
2993 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2996 case ECORE_MCAST_CMD_DEL:
2997 ECORE_MSG(sc, "Invalidating multicast MACs configuration");
2999 /* clear the registry */
3000 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3001 sizeof(o->registry.aprox_match.vec));
3004 case ECORE_MCAST_CMD_RESTORE:
3005 ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
3009 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
3013 /* Set the mcast filter in the internal memory */
3014 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3015 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3017 /* clear the registry */
3018 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3019 sizeof(o->registry.aprox_match.vec));
3022 r->clear_pending(r);
3024 return ECORE_SUCCESS;
3027 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3029 return o->registry.aprox_match.num_bins_set;
3032 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3035 o->registry.aprox_match.num_bins_set = n;
3038 int ecore_config_mcast(struct bnx2x_softc *sc,
3039 struct ecore_mcast_ramrod_params *p,
3040 enum ecore_mcast_cmd cmd)
3042 struct ecore_mcast_obj *o = p->mcast_obj;
3043 struct ecore_raw_obj *r = &o->raw;
3044 int rc = 0, old_reg_size;
3046 /* This is needed to recover number of currently configured mcast macs
3047 * in case of failure.
3049 old_reg_size = o->get_registry_size(o);
3051 /* Do some calculations and checks */
3052 rc = o->validate(sc, p, cmd);
3056 /* Return if there is no work to do */
3057 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3058 return ECORE_SUCCESS;
3061 (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3062 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3064 /* Enqueue the current command to the pending list if we can't complete
3065 * it in the current iteration
3067 if (r->check_pending(r) ||
3068 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3069 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3073 /* As long as the current command is in a command list we
3074 * don't need to handle it separately.
3076 p->mcast_list_len = 0;
3079 if (!r->check_pending(r)) {
3081 /* Set 'pending' state */
3084 /* Configure the new classification in the chip */
3085 rc = o->config_mcast(sc, p, cmd);
3089 /* Wait for a ramrod completion if was requested */
3090 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3091 rc = o->wait_comp(sc, o);
3097 r->clear_pending(r);
3100 o->revert(sc, p, old_reg_size);
3105 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3107 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3108 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3109 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3112 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3114 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3115 ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3116 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3119 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3121 return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3124 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3126 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3129 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3130 struct ecore_mcast_obj *mcast_obj,
3131 uint8_t mcast_cl_id, uint32_t mcast_cid,
3132 uint8_t func_id, uint8_t engine_id, void *rdata,
3133 ecore_dma_addr_t rdata_mapping, int state,
3134 unsigned long *pstate, ecore_obj_type type)
3136 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3138 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3139 rdata, rdata_mapping, state, pstate, type);
3141 mcast_obj->engine_id = engine_id;
3143 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3145 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3146 mcast_obj->check_sched = ecore_mcast_check_sched;
3147 mcast_obj->set_sched = ecore_mcast_set_sched;
3148 mcast_obj->clear_sched = ecore_mcast_clear_sched;
3150 if (CHIP_IS_E1H(sc)) {
3151 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3152 mcast_obj->enqueue_cmd = NULL;
3153 mcast_obj->hdl_restore = NULL;
3154 mcast_obj->check_pending = ecore_mcast_check_pending;
3156 /* 57711 doesn't send a ramrod, so it has unlimited credit
3159 mcast_obj->max_cmd_len = -1;
3160 mcast_obj->wait_comp = ecore_mcast_wait;
3161 mcast_obj->set_one_rule = NULL;
3162 mcast_obj->validate = ecore_mcast_validate_e1h;
3163 mcast_obj->revert = ecore_mcast_revert_e1h;
3164 mcast_obj->get_registry_size =
3165 ecore_mcast_get_registry_size_aprox;
3166 mcast_obj->set_registry_size =
3167 ecore_mcast_set_registry_size_aprox;
3169 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3170 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3171 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3172 mcast_obj->check_pending = ecore_mcast_check_pending;
3173 mcast_obj->max_cmd_len = 16;
3174 mcast_obj->wait_comp = ecore_mcast_wait;
3175 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3176 mcast_obj->validate = ecore_mcast_validate_e2;
3177 mcast_obj->revert = ecore_mcast_revert_e2;
3178 mcast_obj->get_registry_size =
3179 ecore_mcast_get_registry_size_aprox;
3180 mcast_obj->set_registry_size =
3181 ecore_mcast_set_registry_size_aprox;
3185 /*************************** Credit handling **********************************/
3188 * atomic_add_ifless - add if the result is less than a given value.
3190 * @v: pointer of type ecore_atomic_t
3191 * @a: the amount to add to v...
3192 * @u: ...if (v + a) is less than u.
3194 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3197 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3201 c = ECORE_ATOMIC_READ(v);
3203 if (ECORE_UNLIKELY(c + a >= u))
3206 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3207 if (ECORE_LIKELY(old == c))
3216 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3218 * @v: pointer of type ecore_atomic_t
3219 * @a: the amount to dec from v...
3220 * @u: ...if (v - a) is more or equal than u.
3222 * returns TRUE if (v - a) was more or equal than u, and FALSE
3225 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3229 c = ECORE_ATOMIC_READ(v);
3231 if (ECORE_UNLIKELY(c - a < u))
3234 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3235 if (ECORE_LIKELY(old == c))
3243 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3248 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3254 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3260 /* Don't let to refill if credit + cnt > pool_sz */
3261 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3268 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3273 cur_credit = ECORE_ATOMIC_READ(&o->credit);
3278 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3279 ecore_credit_pool_obj *o,
3280 __rte_unused int cnt)
3285 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3292 /* Find "internal cam-offset" then add to base for this object... */
3293 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3295 /* Skip the current vector if there are no free entries in it */
3296 if (!o->pool_mirror[vec])
3299 /* If we've got here we are going to find a free entry */
3300 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3301 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3303 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3305 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3306 *offset = o->base_pool_offset + idx;
3314 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3317 if (offset < o->base_pool_offset)
3320 offset -= o->base_pool_offset;
3322 if (offset >= o->pool_sz)
3325 /* Return the entry to the pool */
3326 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3331 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3332 ecore_credit_pool_obj *o,
3333 __rte_unused int offset)
3338 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3339 ecore_credit_pool_obj *o,
3340 __rte_unused int *offset)
3347 * ecore_init_credit_pool - initialize credit pool internals.
3350 * @base: Base entry in the CAM to use.
3351 * @credit: pool size.
3353 * If base is negative no CAM entries handling will be performed.
3354 * If credit is negative pool operations will always succeed (unlimited pool).
3357 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3358 int base, int credit)
3360 /* Zero the object first */
3361 ECORE_MEMSET(p, 0, sizeof(*p));
3363 /* Set the table to all 1s */
3364 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3366 /* Init a pool as full */
3367 ECORE_ATOMIC_SET(&p->credit, credit);
3369 /* The total poll size */
3370 p->pool_sz = credit;
3372 p->base_pool_offset = base;
3374 /* Commit the change */
3377 p->check = ecore_credit_pool_check;
3379 /* if pool credit is negative - disable the checks */
3381 p->put = ecore_credit_pool_put;
3382 p->get = ecore_credit_pool_get;
3383 p->put_entry = ecore_credit_pool_put_entry;
3384 p->get_entry = ecore_credit_pool_get_entry;
3386 p->put = ecore_credit_pool_always_TRUE;
3387 p->get = ecore_credit_pool_always_TRUE;
3388 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3389 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3392 /* If base is negative - disable entries handling */
3394 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3395 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3399 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3400 struct ecore_credit_pool_obj *p,
3401 uint8_t func_id, uint8_t func_num)
3404 #define ECORE_CAM_SIZE_EMUL 5
3408 if (CHIP_IS_E1H(sc)) {
3409 /* CAM credit is equally divided between all active functions
3413 if (!CHIP_REV_IS_SLOW(sc))
3414 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3416 cam_sz = ECORE_CAM_SIZE_EMUL;
3417 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3419 /* this should never happen! Block MAC operations. */
3420 ecore_init_credit_pool(p, 0, 0);
3426 * CAM credit is equaly divided between all active functions
3430 if (!CHIP_REV_IS_SLOW(sc))
3431 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3433 cam_sz = ECORE_CAM_SIZE_EMUL;
3435 /* No need for CAM entries handling for 57712 and
3438 ecore_init_credit_pool(p, -1, cam_sz);
3440 /* this should never happen! Block MAC operations. */
3441 ecore_init_credit_pool(p, 0, 0);
3446 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3447 struct ecore_credit_pool_obj *p,
3448 uint8_t func_id, uint8_t func_num)
3450 if (CHIP_IS_E1x(sc)) {
3451 /* There is no VLAN credit in HW on 57711 only
3452 * MAC / MAC-VLAN can be set
3454 ecore_init_credit_pool(p, 0, -1);
3456 /* CAM credit is equally divided between all active functions
3460 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3461 ecore_init_credit_pool(p, func_id * credit, credit);
3463 /* this should never happen! Block VLAN operations. */
3464 ecore_init_credit_pool(p, 0, 0);
3468 /****************** RSS Configuration ******************/
3471 * ecore_setup_rss - configure RSS
3473 * @sc: device handle
3474 * @p: rss configuration
3476 * sends on UPDATE ramrod for that matter.
3478 static int ecore_setup_rss(struct bnx2x_softc *sc,
3479 struct ecore_config_rss_params *p)
3481 struct ecore_rss_config_obj *o = p->rss_obj;
3482 struct ecore_raw_obj *r = &o->raw;
3483 struct eth_rss_update_ramrod_data *data =
3484 (struct eth_rss_update_ramrod_data *)(r->rdata);
3485 uint8_t rss_mode = 0;
3488 ECORE_MEMSET(data, 0, sizeof(*data));
3490 ECORE_MSG(sc, "Configuring RSS");
3492 /* Set an echo field */
3493 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3494 (r->state << ECORE_SWCID_SHIFT));
3497 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3498 rss_mode = ETH_RSS_MODE_DISABLED;
3499 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3500 rss_mode = ETH_RSS_MODE_REGULAR;
3502 data->rss_mode = rss_mode;
3504 ECORE_MSG(sc, "rss_mode=%d", rss_mode);
3506 /* RSS capabilities */
3507 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3508 data->capabilities |=
3509 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3511 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3512 data->capabilities |=
3513 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3515 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3516 data->capabilities |=
3517 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3519 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3520 data->capabilities |=
3521 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3523 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3524 data->capabilities |=
3525 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3527 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3528 data->capabilities |=
3529 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3531 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3532 data->udp_4tuple_dst_port_mask =
3533 ECORE_CPU_TO_LE16(p->tunnel_mask);
3534 data->udp_4tuple_dst_port_value =
3535 ECORE_CPU_TO_LE16(p->tunnel_value);
3539 data->rss_result_mask = p->rss_result_mask;
3542 data->rss_engine_id = o->engine_id;
3544 ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
3546 /* Indirection table */
3547 ECORE_MEMCPY(data->indirection_table, p->ind_table,
3548 T_ETH_INDIRECTION_TABLE_SIZE);
3550 /* Remember the last configuration */
3551 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3554 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3555 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3556 sizeof(data->rss_key));
3557 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3560 /* No need for an explicit memory barrier here as long we would
3561 * need to ensure the ordering of writing to the SPQ element
3562 * and updating of the SPQ producer which involves a memory
3563 * read and we will have to put a full memory barrier there
3564 * (inside ecore_sp_post()).
3568 rc = ecore_sp_post(sc,
3569 RAMROD_CMD_ID_ETH_RSS_UPDATE,
3570 r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3575 return ECORE_PENDING;
3578 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3581 struct ecore_rss_config_obj *o = p->rss_obj;
3582 struct ecore_raw_obj *r = &o->raw;
3584 /* Do nothing if only driver cleanup was requested */
3585 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3586 return ECORE_SUCCESS;
3590 rc = o->config_rss(sc, p);
3592 r->clear_pending(r);
3596 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3597 rc = r->wait_comp(sc, r);
3602 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3603 uint8_t cl_id, uint32_t cid, uint8_t func_id,
3604 uint8_t engine_id, void *rdata,
3605 ecore_dma_addr_t rdata_mapping, int state,
3606 unsigned long *pstate, ecore_obj_type type)
3608 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3609 rdata_mapping, state, pstate, type);
3611 rss_obj->engine_id = engine_id;
3612 rss_obj->config_rss = ecore_setup_rss;
3615 /********************** Queue state object ***********************************/
3618 * ecore_queue_state_change - perform Queue state change transition
3620 * @sc: device handle
3621 * @params: parameters to perform the transition
3623 * returns 0 in case of successfully completed transition, negative error
3624 * code in case of failure, positive (EBUSY) value if there is a completion
3625 * to that is still pending (possible only if RAMROD_COMP_WAIT is
3626 * not set in params->ramrod_flags for asynchronous commands).
3629 int ecore_queue_state_change(struct bnx2x_softc *sc,
3630 struct ecore_queue_state_params *params)
3632 struct ecore_queue_sp_obj *o = params->q_obj;
3633 int rc, pending_bit;
3634 unsigned long *pending = &o->pending;
3636 /* Check that the requested transition is legal */
3637 rc = o->check_transition(sc, o, params);
3639 PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
3644 /* Set "pending" bit */
3645 ECORE_MSG(sc, "pending bit was=%lx", o->pending);
3646 pending_bit = o->set_pending(o, params);
3647 ECORE_MSG(sc, "pending bit now=%lx", o->pending);
3649 /* Don't send a command if only driver cleanup was requested */
3650 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
3651 o->complete_cmd(sc, o, pending_bit);
3654 rc = o->send_cmd(sc, params);
3656 o->next_state = ECORE_Q_STATE_MAX;
3657 ECORE_CLEAR_BIT(pending_bit, pending);
3658 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3662 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
3663 rc = o->wait_comp(sc, o, pending_bit);
3667 return ECORE_SUCCESS;
3671 return ECORE_RET_PENDING(pending_bit, pending);
3674 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3675 struct ecore_queue_state_params *params)
3677 enum ecore_queue_cmd cmd = params->cmd, bit;
3679 /* ACTIVATE and DEACTIVATE commands are implemented on top of
3682 if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3683 bit = ECORE_Q_CMD_UPDATE;
3687 ECORE_SET_BIT(bit, &obj->pending);
3691 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3692 struct ecore_queue_sp_obj *o,
3693 enum ecore_queue_cmd cmd)
3695 return ecore_state_wait(sc, cmd, &o->pending);
3699 * ecore_queue_comp_cmd - complete the state change command.
3701 * @sc: device handle
3705 * Checks that the arrived completion is expected.
3707 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3708 struct ecore_queue_sp_obj *o,
3709 enum ecore_queue_cmd cmd)
3711 unsigned long cur_pending = o->pending;
3713 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3714 PMD_DRV_LOG(ERR, sc,
3715 "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3716 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3717 cur_pending, o->next_state);
3721 if (o->next_tx_only >= o->max_cos)
3722 /* >= because tx only must always be smaller than cos since the
3723 * primary connection supports COS 0
3725 PMD_DRV_LOG(ERR, sc,
3726 "illegal value for next tx_only: %d. max cos was %d",
3727 o->next_tx_only, o->max_cos);
3729 ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
3730 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3732 if (o->next_tx_only) /* print num tx-only if any exist */
3733 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
3734 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3736 o->state = o->next_state;
3737 o->num_tx_only = o->next_tx_only;
3738 o->next_state = ECORE_Q_STATE_MAX;
3740 /* It's important that o->state and o->next_state are
3741 * updated before o->pending.
3745 ECORE_CLEAR_BIT(cmd, &o->pending);
3746 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3748 return ECORE_SUCCESS;
3751 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3753 struct client_init_ramrod_data *data)
3755 struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3759 /* IPv6 TPA supported for E2 and above only */
3760 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3762 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3765 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3766 struct ecore_queue_sp_obj *o,
3767 struct ecore_general_setup_params
3768 *params, struct client_init_general_data
3769 *gen_data, unsigned long *flags)
3771 gen_data->client_id = o->cl_id;
3773 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3774 gen_data->statistics_counter_id = params->stat_id;
3775 gen_data->statistics_en_flg = 1;
3776 gen_data->statistics_zero_flg =
3777 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3779 gen_data->statistics_counter_id =
3780 DISABLE_STATISTIC_COUNTER_ID_VALUE;
3782 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3783 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3784 gen_data->sp_client_id = params->spcl_id;
3785 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3786 gen_data->func_id = o->func_id;
3788 gen_data->cos = params->cos;
3790 gen_data->traffic_type =
3791 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3792 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3794 ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
3795 gen_data->activate_flg, gen_data->cos,
3796 gen_data->statistics_en_flg);
3799 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3800 struct client_init_tx_data *tx_data,
3801 unsigned long *flags)
3803 tx_data->enforce_security_flg =
3804 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3805 tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3806 tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3807 tx_data->tx_switching_flg =
3808 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3809 tx_data->anti_spoofing_flg =
3810 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3811 tx_data->force_default_pri_flg =
3812 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3813 tx_data->refuse_outband_vlan_flg =
3814 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3815 tx_data->tunnel_non_lso_pcsum_location =
3816 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3819 tx_data->tx_status_block_id = params->fw_sb_id;
3820 tx_data->tx_sb_index_number = params->sb_cq_index;
3821 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3823 tx_data->tx_bd_page_base.lo =
3824 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3825 tx_data->tx_bd_page_base.hi =
3826 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3828 /* Don't configure any Tx switching mode during queue SETUP */
3832 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3833 struct client_init_rx_data *rx_data)
3835 /* flow control data */
3836 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3837 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3838 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3839 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3840 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3841 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3842 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3845 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3846 struct client_init_rx_data *rx_data,
3847 unsigned long *flags)
3849 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3850 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3851 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3852 CLIENT_INIT_RX_DATA_TPA_MODE;
3853 rx_data->vmqueue_mode_en_flg = 0;
3855 rx_data->extra_data_over_sgl_en_flg =
3856 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3857 rx_data->cache_line_alignment_log_size = params->cache_line_log;
3858 rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3859 rx_data->client_qzone_id = params->cl_qzone_id;
3860 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3862 /* Always start in DROP_ALL mode */
3863 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3864 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3866 /* We don't set drop flags */
3867 rx_data->drop_ip_cs_err_flg = 0;
3868 rx_data->drop_tcp_cs_err_flg = 0;
3869 rx_data->drop_ttl0_flg = 0;
3870 rx_data->drop_udp_cs_err_flg = 0;
3871 rx_data->inner_vlan_removal_enable_flg =
3872 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3873 rx_data->outer_vlan_removal_enable_flg =
3874 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3875 rx_data->status_block_id = params->fw_sb_id;
3876 rx_data->rx_sb_index_number = params->sb_cq_index;
3877 rx_data->max_tpa_queues = params->max_tpa_queues;
3878 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3879 rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3880 rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3881 rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3882 rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3883 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3886 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3887 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3888 rx_data->is_approx_mcast = 1;
3891 rx_data->rss_engine_id = params->rss_engine_id;
3893 /* silent vlan removal */
3894 rx_data->silent_vlan_removal_flg =
3895 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3896 rx_data->silent_vlan_value =
3897 ECORE_CPU_TO_LE16(params->silent_removal_value);
3898 rx_data->silent_vlan_mask =
3899 ECORE_CPU_TO_LE16(params->silent_removal_mask);
3902 /* initialize the general, tx and rx parts of a queue object */
3903 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3905 struct client_init_ramrod_data *data)
3907 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3908 &cmd_params->params.setup.gen_params,
3910 &cmd_params->params.setup.flags);
3912 ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3913 &data->tx, &cmd_params->params.setup.flags);
3915 ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3916 &data->rx, &cmd_params->params.setup.flags);
3918 ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3922 /* initialize the general and tx parts of a tx-only queue object */
3923 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3925 struct tx_queue_init_ramrod_data *data)
3927 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3928 &cmd_params->params.tx_only.gen_params,
3930 &cmd_params->params.tx_only.flags);
3932 ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3933 &data->tx, &cmd_params->params.tx_only.flags);
3935 ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
3936 cmd_params->q_obj->cids[0],
3937 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3941 * ecore_q_init - init HW/FW queue
3943 * @sc: device handle
3946 * HW/FW initial Queue configuration:
3948 * - CDU context validation
3951 static int ecore_q_init(struct bnx2x_softc *sc,
3952 struct ecore_queue_state_params *params)
3954 struct ecore_queue_sp_obj *o = params->q_obj;
3955 struct ecore_queue_init_params *init = ¶ms->params.init;
3959 /* Tx HC configuration */
3960 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3961 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3962 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3964 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3965 init->tx.sb_cq_index,
3968 &init->tx.flags), hc_usec);
3971 /* Rx HC configuration */
3972 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3973 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3974 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3976 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3977 init->rx.sb_cq_index,
3980 &init->rx.flags), hc_usec);
3983 /* Set CDU context validation values */
3984 for (cos = 0; cos < o->max_cos; cos++) {
3985 ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
3987 ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
3988 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3991 /* As no ramrod is sent, complete the command immediately */
3992 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3997 return ECORE_SUCCESS;
4000 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
4003 struct ecore_queue_sp_obj *o = params->q_obj;
4004 struct client_init_ramrod_data *rdata =
4005 (struct client_init_ramrod_data *)o->rdata;
4006 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4007 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4009 /* Clear the ramrod data */
4010 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4012 /* Fill the ramrod data */
4013 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4015 /* No need for an explicit memory barrier here as long we would
4016 * need to ensure the ordering of writing to the SPQ element
4017 * and updating of the SPQ producer which involves a memory
4018 * read and we will have to put a full memory barrier there
4019 * (inside ecore_sp_post()).
4022 return ecore_sp_post(sc,
4024 o->cids[ECORE_PRIMARY_CID_INDEX],
4025 data_mapping, ETH_CONNECTION_TYPE);
4028 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4029 struct ecore_queue_state_params *params)
4031 struct ecore_queue_sp_obj *o = params->q_obj;
4032 struct client_init_ramrod_data *rdata =
4033 (struct client_init_ramrod_data *)o->rdata;
4034 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4035 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4037 /* Clear the ramrod data */
4038 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4040 /* Fill the ramrod data */
4041 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4042 ecore_q_fill_setup_data_e2(params, rdata);
4044 /* No need for an explicit memory barrier here as long we would
4045 * need to ensure the ordering of writing to the SPQ element
4046 * and updating of the SPQ producer which involves a memory
4047 * read and we will have to put a full memory barrier there
4048 * (inside ecore_sp_post()).
4051 return ecore_sp_post(sc,
4053 o->cids[ECORE_PRIMARY_CID_INDEX],
4054 data_mapping, ETH_CONNECTION_TYPE);
4057 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4060 struct ecore_queue_sp_obj *o = params->q_obj;
4061 struct tx_queue_init_ramrod_data *rdata =
4062 (struct tx_queue_init_ramrod_data *)o->rdata;
4063 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4064 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4065 struct ecore_queue_setup_tx_only_params *tx_only_params =
4066 ¶ms->params.tx_only;
4067 uint8_t cid_index = tx_only_params->cid_index;
4069 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4070 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4071 ECORE_MSG(sc, "sending forward tx-only ramrod");
4073 if (cid_index >= o->max_cos) {
4074 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4075 o->cl_id, cid_index);
4079 ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
4080 tx_only_params->gen_params.cos,
4081 tx_only_params->gen_params.spcl_id);
4083 /* Clear the ramrod data */
4084 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4086 /* Fill the ramrod data */
4087 ecore_q_fill_setup_tx_only(sc, params, rdata);
4090 (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4091 o->cids[cid_index], rdata->general.client_id,
4092 rdata->general.sp_client_id, rdata->general.cos);
4094 /* No need for an explicit memory barrier here as long we would
4095 * need to ensure the ordering of writing to the SPQ element
4096 * and updating of the SPQ producer which involves a memory
4097 * read and we will have to put a full memory barrier there
4098 * (inside ecore_sp_post()).
4101 return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4102 data_mapping, ETH_CONNECTION_TYPE);
4105 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4106 struct ecore_queue_update_params *params,
4107 struct client_update_ramrod_data *data)
4109 /* Client ID of the client to update */
4110 data->client_id = obj->cl_id;
4112 /* Function ID of the client to update */
4113 data->func_id = obj->func_id;
4115 /* Default VLAN value */
4116 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4118 /* Inner VLAN stripping */
4119 data->inner_vlan_removal_enable_flg =
4120 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4121 data->inner_vlan_removal_change_flg =
4122 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4123 ¶ms->update_flags);
4125 /* Outer VLAN stripping */
4126 data->outer_vlan_removal_enable_flg =
4127 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4128 data->outer_vlan_removal_change_flg =
4129 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4130 ¶ms->update_flags);
4132 /* Drop packets that have source MAC that doesn't belong to this
4135 data->anti_spoofing_enable_flg =
4136 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4137 data->anti_spoofing_change_flg =
4138 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4139 ¶ms->update_flags);
4141 /* Activate/Deactivate */
4142 data->activate_flg =
4143 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4144 data->activate_change_flg =
4145 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4147 /* Enable default VLAN */
4148 data->default_vlan_enable_flg =
4149 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4150 data->default_vlan_change_flg =
4151 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4152 ¶ms->update_flags);
4154 /* silent vlan removal */
4155 data->silent_vlan_change_flg =
4156 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4157 ¶ms->update_flags);
4158 data->silent_vlan_removal_flg =
4159 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4160 ¶ms->update_flags);
4161 data->silent_vlan_value =
4162 ECORE_CPU_TO_LE16(params->silent_removal_value);
4163 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4166 data->tx_switching_flg =
4167 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags);
4168 data->tx_switching_change_flg =
4169 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4170 ¶ms->update_flags);
4173 static int ecore_q_send_update(struct bnx2x_softc *sc,
4174 struct ecore_queue_state_params *params)
4176 struct ecore_queue_sp_obj *o = params->q_obj;
4177 struct client_update_ramrod_data *rdata =
4178 (struct client_update_ramrod_data *)o->rdata;
4179 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4180 struct ecore_queue_update_params *update_params =
4181 ¶ms->params.update;
4182 uint8_t cid_index = update_params->cid_index;
4184 if (cid_index >= o->max_cos) {
4185 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4186 o->cl_id, cid_index);
4190 /* Clear the ramrod data */
4191 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4193 /* Fill the ramrod data */
4194 ecore_q_fill_update_data(o, update_params, rdata);
4196 /* No need for an explicit memory barrier here as long we would
4197 * need to ensure the ordering of writing to the SPQ element
4198 * and updating of the SPQ producer which involves a memory
4199 * read and we will have to put a full memory barrier there
4200 * (inside ecore_sp_post()).
4203 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4204 o->cids[cid_index], data_mapping,
4205 ETH_CONNECTION_TYPE);
4209 * ecore_q_send_deactivate - send DEACTIVATE command
4211 * @sc: device handle
4214 * implemented using the UPDATE command.
4216 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4219 struct ecore_queue_update_params *update = ¶ms->params.update;
4221 ECORE_MEMSET(update, 0, sizeof(*update));
4223 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4225 return ecore_q_send_update(sc, params);
4229 * ecore_q_send_activate - send ACTIVATE command
4231 * @sc: device handle
4234 * implemented using the UPDATE command.
4236 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4237 struct ecore_queue_state_params *params)
4239 struct ecore_queue_update_params *update = ¶ms->params.update;
4241 ECORE_MEMSET(update, 0, sizeof(*update));
4243 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4244 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4246 return ecore_q_send_update(sc, params);
4249 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4251 ecore_queue_state_params *params)
4253 /* Not implemented yet. */
4257 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4258 struct ecore_queue_state_params *params)
4260 struct ecore_queue_sp_obj *o = params->q_obj;
4262 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4263 ecore_dma_addr_t data_mapping = 0;
4264 data_mapping = (ecore_dma_addr_t) o->cl_id;
4266 return ecore_sp_post(sc,
4267 RAMROD_CMD_ID_ETH_HALT,
4268 o->cids[ECORE_PRIMARY_CID_INDEX],
4269 data_mapping, ETH_CONNECTION_TYPE);
4272 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4273 struct ecore_queue_state_params *params)
4275 struct ecore_queue_sp_obj *o = params->q_obj;
4276 uint8_t cid_idx = params->params.cfc_del.cid_index;
4278 if (cid_idx >= o->max_cos) {
4279 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4284 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4285 o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4288 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4291 struct ecore_queue_sp_obj *o = params->q_obj;
4292 uint8_t cid_index = params->params.terminate.cid_index;
4294 if (cid_index >= o->max_cos) {
4295 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4296 o->cl_id, cid_index);
4300 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4301 o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4304 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4305 struct ecore_queue_state_params *params)
4307 struct ecore_queue_sp_obj *o = params->q_obj;
4309 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4310 o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4311 ETH_CONNECTION_TYPE);
4314 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4317 switch (params->cmd) {
4318 case ECORE_Q_CMD_INIT:
4319 return ecore_q_init(sc, params);
4320 case ECORE_Q_CMD_SETUP_TX_ONLY:
4321 return ecore_q_send_setup_tx_only(sc, params);
4322 case ECORE_Q_CMD_DEACTIVATE:
4323 return ecore_q_send_deactivate(sc, params);
4324 case ECORE_Q_CMD_ACTIVATE:
4325 return ecore_q_send_activate(sc, params);
4326 case ECORE_Q_CMD_UPDATE:
4327 return ecore_q_send_update(sc, params);
4328 case ECORE_Q_CMD_UPDATE_TPA:
4329 return ecore_q_send_update_tpa(sc, params);
4330 case ECORE_Q_CMD_HALT:
4331 return ecore_q_send_halt(sc, params);
4332 case ECORE_Q_CMD_CFC_DEL:
4333 return ecore_q_send_cfc_del(sc, params);
4334 case ECORE_Q_CMD_TERMINATE:
4335 return ecore_q_send_terminate(sc, params);
4336 case ECORE_Q_CMD_EMPTY:
4337 return ecore_q_send_empty(sc, params);
4339 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4344 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4345 struct ecore_queue_state_params *params)
4347 switch (params->cmd) {
4348 case ECORE_Q_CMD_SETUP:
4349 return ecore_q_send_setup_e1x(sc, params);
4350 case ECORE_Q_CMD_INIT:
4351 case ECORE_Q_CMD_SETUP_TX_ONLY:
4352 case ECORE_Q_CMD_DEACTIVATE:
4353 case ECORE_Q_CMD_ACTIVATE:
4354 case ECORE_Q_CMD_UPDATE:
4355 case ECORE_Q_CMD_UPDATE_TPA:
4356 case ECORE_Q_CMD_HALT:
4357 case ECORE_Q_CMD_CFC_DEL:
4358 case ECORE_Q_CMD_TERMINATE:
4359 case ECORE_Q_CMD_EMPTY:
4360 return ecore_queue_send_cmd_cmn(sc, params);
4362 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4367 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4368 struct ecore_queue_state_params *params)
4370 switch (params->cmd) {
4371 case ECORE_Q_CMD_SETUP:
4372 return ecore_q_send_setup_e2(sc, params);
4373 case ECORE_Q_CMD_INIT:
4374 case ECORE_Q_CMD_SETUP_TX_ONLY:
4375 case ECORE_Q_CMD_DEACTIVATE:
4376 case ECORE_Q_CMD_ACTIVATE:
4377 case ECORE_Q_CMD_UPDATE:
4378 case ECORE_Q_CMD_UPDATE_TPA:
4379 case ECORE_Q_CMD_HALT:
4380 case ECORE_Q_CMD_CFC_DEL:
4381 case ECORE_Q_CMD_TERMINATE:
4382 case ECORE_Q_CMD_EMPTY:
4383 return ecore_queue_send_cmd_cmn(sc, params);
4385 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4391 * ecore_queue_chk_transition - check state machine of a regular Queue
4393 * @sc: device handle
4398 * It both checks if the requested command is legal in a current
4399 * state and, if it's legal, sets a `next_state' in the object
4400 * that will be used in the completion flow to set the `state'
4403 * returns 0 if a requested command is a legal transition,
4404 * ECORE_INVAL otherwise.
4406 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4407 struct ecore_queue_sp_obj *o,
4408 struct ecore_queue_state_params *params)
4410 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4411 enum ecore_queue_cmd cmd = params->cmd;
4412 struct ecore_queue_update_params *update_params =
4413 ¶ms->params.update;
4414 uint8_t next_tx_only = o->num_tx_only;
4416 /* Forget all pending for completion commands if a driver only state
4417 * transition has been requested.
4419 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4421 o->next_state = ECORE_Q_STATE_MAX;
4424 /* Don't allow a next state transition if we are in the middle of
4428 PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx",
4434 case ECORE_Q_STATE_RESET:
4435 if (cmd == ECORE_Q_CMD_INIT)
4436 next_state = ECORE_Q_STATE_INITIALIZED;
4439 case ECORE_Q_STATE_INITIALIZED:
4440 if (cmd == ECORE_Q_CMD_SETUP) {
4441 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4442 ¶ms->params.setup.flags))
4443 next_state = ECORE_Q_STATE_ACTIVE;
4445 next_state = ECORE_Q_STATE_INACTIVE;
4449 case ECORE_Q_STATE_ACTIVE:
4450 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4451 next_state = ECORE_Q_STATE_INACTIVE;
4453 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4454 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4455 next_state = ECORE_Q_STATE_ACTIVE;
4457 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4458 next_state = ECORE_Q_STATE_MULTI_COS;
4462 else if (cmd == ECORE_Q_CMD_HALT)
4463 next_state = ECORE_Q_STATE_STOPPED;
4465 else if (cmd == ECORE_Q_CMD_UPDATE) {
4466 /* If "active" state change is requested, update the
4467 * state accordingly.
4469 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4470 &update_params->update_flags) &&
4471 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4472 &update_params->update_flags))
4473 next_state = ECORE_Q_STATE_INACTIVE;
4475 next_state = ECORE_Q_STATE_ACTIVE;
4479 case ECORE_Q_STATE_MULTI_COS:
4480 if (cmd == ECORE_Q_CMD_TERMINATE)
4481 next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4483 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4484 next_state = ECORE_Q_STATE_MULTI_COS;
4485 next_tx_only = o->num_tx_only + 1;
4488 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4489 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4490 next_state = ECORE_Q_STATE_MULTI_COS;
4492 else if (cmd == ECORE_Q_CMD_UPDATE) {
4493 /* If "active" state change is requested, update the
4494 * state accordingly.
4496 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4497 &update_params->update_flags) &&
4498 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4499 &update_params->update_flags))
4500 next_state = ECORE_Q_STATE_INACTIVE;
4502 next_state = ECORE_Q_STATE_MULTI_COS;
4506 case ECORE_Q_STATE_MCOS_TERMINATED:
4507 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4508 next_tx_only = o->num_tx_only - 1;
4509 if (next_tx_only == 0)
4510 next_state = ECORE_Q_STATE_ACTIVE;
4512 next_state = ECORE_Q_STATE_MULTI_COS;
4516 case ECORE_Q_STATE_INACTIVE:
4517 if (cmd == ECORE_Q_CMD_ACTIVATE)
4518 next_state = ECORE_Q_STATE_ACTIVE;
4520 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4521 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4522 next_state = ECORE_Q_STATE_INACTIVE;
4524 else if (cmd == ECORE_Q_CMD_HALT)
4525 next_state = ECORE_Q_STATE_STOPPED;
4527 else if (cmd == ECORE_Q_CMD_UPDATE) {
4528 /* If "active" state change is requested, update the
4529 * state accordingly.
4531 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4532 &update_params->update_flags) &&
4533 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4534 &update_params->update_flags)) {
4535 if (o->num_tx_only == 0)
4536 next_state = ECORE_Q_STATE_ACTIVE;
4537 else /* tx only queues exist for this queue */
4538 next_state = ECORE_Q_STATE_MULTI_COS;
4540 next_state = ECORE_Q_STATE_INACTIVE;
4544 case ECORE_Q_STATE_STOPPED:
4545 if (cmd == ECORE_Q_CMD_TERMINATE)
4546 next_state = ECORE_Q_STATE_TERMINATED;
4549 case ECORE_Q_STATE_TERMINATED:
4550 if (cmd == ECORE_Q_CMD_CFC_DEL)
4551 next_state = ECORE_Q_STATE_RESET;
4555 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4558 /* Transition is assured */
4559 if (next_state != ECORE_Q_STATE_MAX) {
4560 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4561 state, cmd, next_state);
4562 o->next_state = next_state;
4563 o->next_tx_only = next_tx_only;
4564 return ECORE_SUCCESS;
4567 ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4573 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4575 * @sc: device handle
4579 * It both checks if the requested command is legal in a current
4580 * state and, if it's legal, sets a `next_state' in the object
4581 * that will be used in the completion flow to set the `state'
4584 * returns 0 if a requested command is a legal transition,
4585 * ECORE_INVAL otherwise.
4587 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4588 struct ecore_queue_sp_obj *o,
4589 struct ecore_queue_state_params
4592 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4593 enum ecore_queue_cmd cmd = params->cmd;
4596 case ECORE_Q_STATE_RESET:
4597 if (cmd == ECORE_Q_CMD_INIT)
4598 next_state = ECORE_Q_STATE_INITIALIZED;
4601 case ECORE_Q_STATE_INITIALIZED:
4602 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4603 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4604 ¶ms->params.tx_only.flags))
4605 next_state = ECORE_Q_STATE_ACTIVE;
4607 next_state = ECORE_Q_STATE_INACTIVE;
4611 case ECORE_Q_STATE_ACTIVE:
4612 case ECORE_Q_STATE_INACTIVE:
4613 if (cmd == ECORE_Q_CMD_CFC_DEL)
4614 next_state = ECORE_Q_STATE_RESET;
4618 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4621 /* Transition is assured */
4622 if (next_state != ECORE_Q_STATE_MAX) {
4623 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4624 state, cmd, next_state);
4625 o->next_state = next_state;
4626 return ECORE_SUCCESS;
4629 ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4633 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4634 struct ecore_queue_sp_obj *obj,
4635 uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4636 uint8_t func_id, void *rdata,
4637 ecore_dma_addr_t rdata_mapping, unsigned long type)
4639 ECORE_MEMSET(obj, 0, sizeof(*obj));
4641 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4642 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4644 rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4645 obj->max_cos = cid_cnt;
4647 obj->func_id = func_id;
4649 obj->rdata_mapping = rdata_mapping;
4651 obj->next_state = ECORE_Q_STATE_MAX;
4653 if (CHIP_IS_E1x(sc))
4654 obj->send_cmd = ecore_queue_send_cmd_e1x;
4656 obj->send_cmd = ecore_queue_send_cmd_e2;
4658 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4659 obj->check_transition = ecore_queue_chk_fwd_transition;
4661 obj->check_transition = ecore_queue_chk_transition;
4663 obj->complete_cmd = ecore_queue_comp_cmd;
4664 obj->wait_comp = ecore_queue_wait_comp;
4665 obj->set_pending = ecore_queue_set_pending;
4668 /********************** Function state object *********************************/
4669 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4670 struct ecore_func_sp_obj *o)
4672 /* in the middle of transaction - return INVALID state */
4674 return ECORE_F_STATE_MAX;
4676 /* unsure the order of reading of o->pending and o->state
4677 * o->pending should be read first
4684 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4685 struct ecore_func_sp_obj *o,
4686 enum ecore_func_cmd cmd)
4688 return ecore_state_wait(sc, cmd, &o->pending);
4692 * ecore_func_state_change_comp - complete the state machine transition
4694 * @sc: device handle
4698 * Called on state change transition. Completes the state
4699 * machine transition only - no HW interaction.
4702 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4703 struct ecore_func_sp_obj *o,
4704 enum ecore_func_cmd cmd)
4706 unsigned long cur_pending = o->pending;
4708 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4709 PMD_DRV_LOG(ERR, sc,
4710 "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4711 cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4716 ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
4717 cmd, ECORE_FUNC_ID(sc), o->next_state);
4719 o->state = o->next_state;
4720 o->next_state = ECORE_F_STATE_MAX;
4722 /* It's important that o->state and o->next_state are
4723 * updated before o->pending.
4727 ECORE_CLEAR_BIT(cmd, &o->pending);
4728 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4730 return ECORE_SUCCESS;
4734 * ecore_func_comp_cmd - complete the state change command
4736 * @sc: device handle
4740 * Checks that the arrived completion is expected.
4742 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4743 struct ecore_func_sp_obj *o,
4744 enum ecore_func_cmd cmd)
4746 /* Complete the state machine part first, check if it's a
4749 int rc = ecore_func_state_change_comp(sc, o, cmd);
4754 * ecore_func_chk_transition - perform function state machine transition
4756 * @sc: device handle
4760 * It both checks if the requested command is legal in a current
4761 * state and, if it's legal, sets a `next_state' in the object
4762 * that will be used in the completion flow to set the `state'
4765 * returns 0 if a requested command is a legal transition,
4766 * ECORE_INVAL otherwise.
4768 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4769 struct ecore_func_sp_obj *o,
4770 struct ecore_func_state_params *params)
4772 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4773 enum ecore_func_cmd cmd = params->cmd;
4775 /* Forget all pending for completion commands if a driver only state
4776 * transition has been requested.
4778 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4780 o->next_state = ECORE_F_STATE_MAX;
4783 /* Don't allow a next state transition if we are in the middle of
4790 case ECORE_F_STATE_RESET:
4791 if (cmd == ECORE_F_CMD_HW_INIT)
4792 next_state = ECORE_F_STATE_INITIALIZED;
4795 case ECORE_F_STATE_INITIALIZED:
4796 if (cmd == ECORE_F_CMD_START)
4797 next_state = ECORE_F_STATE_STARTED;
4799 else if (cmd == ECORE_F_CMD_HW_RESET)
4800 next_state = ECORE_F_STATE_RESET;
4803 case ECORE_F_STATE_STARTED:
4804 if (cmd == ECORE_F_CMD_STOP)
4805 next_state = ECORE_F_STATE_INITIALIZED;
4806 /* afex ramrods can be sent only in started mode, and only
4807 * if not pending for function_stop ramrod completion
4808 * for these events - next state remained STARTED.
4810 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4811 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4812 next_state = ECORE_F_STATE_STARTED;
4814 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4815 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4816 next_state = ECORE_F_STATE_STARTED;
4818 /* Switch_update ramrod can be sent in either started or
4819 * tx_stopped state, and it doesn't change the state.
4821 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4822 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4823 next_state = ECORE_F_STATE_STARTED;
4825 else if (cmd == ECORE_F_CMD_TX_STOP)
4826 next_state = ECORE_F_STATE_TX_STOPPED;
4829 case ECORE_F_STATE_TX_STOPPED:
4830 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4831 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4832 next_state = ECORE_F_STATE_TX_STOPPED;
4834 else if (cmd == ECORE_F_CMD_TX_START)
4835 next_state = ECORE_F_STATE_STARTED;
4839 PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
4842 /* Transition is assured */
4843 if (next_state != ECORE_F_STATE_MAX) {
4844 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
4845 state, cmd, next_state);
4846 o->next_state = next_state;
4847 return ECORE_SUCCESS;
4851 "Bad function state transition request: %d %d", state, cmd);
4857 * ecore_func_init_func - performs HW init at function stage
4859 * @sc: device handle
4862 * Init HW when the current phase is
4863 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4866 static int ecore_func_init_func(struct bnx2x_softc *sc,
4867 const struct ecore_func_sp_drv_ops *drv)
4869 return drv->init_hw_func(sc);
4873 * ecore_func_init_port - performs HW init at port stage
4875 * @sc: device handle
4878 * Init HW when the current phase is
4879 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4880 * FUNCTION-only HW blocks.
4883 static int ecore_func_init_port(struct bnx2x_softc *sc,
4884 const struct ecore_func_sp_drv_ops *drv)
4886 int rc = drv->init_hw_port(sc);
4890 return ecore_func_init_func(sc, drv);
4894 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4896 * @sc: device handle
4899 * Init HW when the current phase is
4900 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4901 * PORT-only and FUNCTION-only HW blocks.
4903 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4906 int rc = drv->init_hw_cmn_chip(sc);
4910 return ecore_func_init_port(sc, drv);
4914 * ecore_func_init_cmn - performs HW init at common stage
4916 * @sc: device handle
4919 * Init HW when the current phase is
4920 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4921 * PORT-only and FUNCTION-only HW blocks.
4923 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4924 const struct ecore_func_sp_drv_ops *drv)
4926 int rc = drv->init_hw_cmn(sc);
4930 return ecore_func_init_port(sc, drv);
4933 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4934 struct ecore_func_state_params *params)
4936 uint32_t load_code = params->params.hw_init.load_phase;
4937 struct ecore_func_sp_obj *o = params->f_obj;
4938 const struct ecore_func_sp_drv_ops *drv = o->drv;
4941 ECORE_MSG(sc, "function %d load_code %x",
4942 ECORE_ABS_FUNC_ID(sc), load_code);
4945 rc = drv->init_fw(sc);
4947 PMD_DRV_LOG(ERR, sc, "Error loading firmware");
4951 /* Handle the beginning of COMMON_XXX pases separately... */
4952 switch (load_code) {
4953 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4954 rc = ecore_func_init_cmn_chip(sc, drv);
4959 case FW_MSG_CODE_DRV_LOAD_COMMON:
4960 rc = ecore_func_init_cmn(sc, drv);
4965 case FW_MSG_CODE_DRV_LOAD_PORT:
4966 rc = ecore_func_init_port(sc, drv);
4971 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4972 rc = ecore_func_init_func(sc, drv);
4978 PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
4984 /* In case of success, complete the command immediately: no ramrods
4988 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4994 * ecore_func_reset_func - reset HW at function stage
4996 * @sc: device handle
4999 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5000 * FUNCTION-only HW blocks.
5002 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5005 drv->reset_hw_func(sc);
5009 * ecore_func_reset_port - reser HW at port stage
5011 * @sc: device handle
5014 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5015 * FUNCTION-only and PORT-only HW blocks.
5019 * It's important to call reset_port before reset_func() as the last thing
5020 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5021 * makes impossible any DMAE transactions.
5023 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5026 drv->reset_hw_port(sc);
5027 ecore_func_reset_func(sc, drv);
5031 * ecore_func_reset_cmn - reser HW at common stage
5033 * @sc: device handle
5036 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5037 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5038 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5040 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5041 const struct ecore_func_sp_drv_ops *drv)
5043 ecore_func_reset_port(sc, drv);
5044 drv->reset_hw_cmn(sc);
5047 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5048 struct ecore_func_state_params *params)
5050 uint32_t reset_phase = params->params.hw_reset.reset_phase;
5051 struct ecore_func_sp_obj *o = params->f_obj;
5052 const struct ecore_func_sp_drv_ops *drv = o->drv;
5054 ECORE_MSG(sc, "function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5057 switch (reset_phase) {
5058 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5059 ecore_func_reset_cmn(sc, drv);
5061 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5062 ecore_func_reset_port(sc, drv);
5064 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5065 ecore_func_reset_func(sc, drv);
5068 PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
5073 /* Complete the command immediately: no ramrods have been sent. */
5074 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5076 return ECORE_SUCCESS;
5079 static int ecore_func_send_start(struct bnx2x_softc *sc,
5080 struct ecore_func_state_params *params)
5082 struct ecore_func_sp_obj *o = params->f_obj;
5083 struct function_start_data *rdata =
5084 (struct function_start_data *)o->rdata;
5085 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5086 struct ecore_func_start_params *start_params = ¶ms->params.start;
5088 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5090 /* Fill the ramrod data with provided parameters */
5091 rdata->function_mode = (uint8_t) start_params->mf_mode;
5092 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5093 rdata->path_id = ECORE_PATH_ID(sc);
5094 rdata->network_cos_mode = start_params->network_cos_mode;
5095 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5096 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5099 * No need for an explicit memory barrier here as long we would
5100 * need to ensure the ordering of writing to the SPQ element
5101 * and updating of the SPQ producer which involves a memory
5102 * read and we will have to put a full memory barrier there
5103 * (inside ecore_sp_post()).
5106 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5107 data_mapping, NONE_CONNECTION_TYPE);
5110 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5113 struct ecore_func_sp_obj *o = params->f_obj;
5114 struct function_update_data *rdata =
5115 (struct function_update_data *)o->rdata;
5116 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5117 struct ecore_func_switch_update_params *switch_update_params =
5118 ¶ms->params.switch_update;
5120 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5122 /* Fill the ramrod data with provided parameters */
5123 rdata->tx_switch_suspend_change_flg = 1;
5124 rdata->tx_switch_suspend = switch_update_params->suspend;
5125 rdata->echo = SWITCH_UPDATE;
5127 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5128 data_mapping, NONE_CONNECTION_TYPE);
5131 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5134 struct ecore_func_sp_obj *o = params->f_obj;
5135 struct function_update_data *rdata =
5136 (struct function_update_data *)o->afex_rdata;
5137 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5138 struct ecore_func_afex_update_params *afex_update_params =
5139 ¶ms->params.afex_update;
5141 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5143 /* Fill the ramrod data with provided parameters */
5144 rdata->vif_id_change_flg = 1;
5145 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5146 rdata->afex_default_vlan_change_flg = 1;
5147 rdata->afex_default_vlan =
5148 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5149 rdata->allowed_priorities_change_flg = 1;
5150 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5151 rdata->echo = AFEX_UPDATE;
5153 /* No need for an explicit memory barrier here as long we would
5154 * need to ensure the ordering of writing to the SPQ element
5155 * and updating of the SPQ producer which involves a memory
5156 * read and we will have to put a full memory barrier there
5157 * (inside ecore_sp_post()).
5159 ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5161 rdata->afex_default_vlan, rdata->allowed_priorities);
5163 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5164 data_mapping, NONE_CONNECTION_TYPE);
5168 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5169 struct ecore_func_state_params *params)
5171 struct ecore_func_sp_obj *o = params->f_obj;
5172 struct afex_vif_list_ramrod_data *rdata =
5173 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5174 struct ecore_func_afex_viflists_params *afex_vif_params =
5175 ¶ms->params.afex_viflists;
5176 uint64_t *p_rdata = (uint64_t *) rdata;
5178 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5180 /* Fill the ramrod data with provided parameters */
5181 rdata->vif_list_index =
5182 ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5183 rdata->func_bit_map = afex_vif_params->func_bit_map;
5184 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5185 rdata->func_to_clear = afex_vif_params->func_to_clear;
5187 /* send in echo type of sub command */
5188 rdata->echo = afex_vif_params->afex_vif_list_command;
5190 /* No need for an explicit memory barrier here as long we would
5191 * need to ensure the ordering of writing to the SPQ element
5192 * and updating of the SPQ producer which involves a memory
5193 * read and we will have to put a full memory barrier there
5194 * (inside ecore_sp_post()).
5198 (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5199 rdata->afex_vif_list_command, rdata->vif_list_index,
5200 rdata->func_bit_map, rdata->func_to_clear);
5202 /* this ramrod sends data directly and not through DMA mapping */
5203 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5204 *p_rdata, NONE_CONNECTION_TYPE);
5207 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5208 ecore_func_state_params *params)
5210 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5211 NONE_CONNECTION_TYPE);
5214 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5215 ecore_func_state_params *params)
5217 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5218 NONE_CONNECTION_TYPE);
5221 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5224 struct ecore_func_sp_obj *o = params->f_obj;
5225 struct flow_control_configuration *rdata =
5226 (struct flow_control_configuration *)o->rdata;
5227 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5228 struct ecore_func_tx_start_params *tx_start_params =
5229 ¶ms->params.tx_start;
5232 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5234 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5235 rdata->dcb_version = tx_start_params->dcb_version;
5236 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5238 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5239 rdata->traffic_type_to_priority_cos[i] =
5240 tx_start_params->traffic_type_to_priority_cos[i];
5242 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5243 data_mapping, NONE_CONNECTION_TYPE);
5246 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5247 struct ecore_func_state_params *params)
5249 switch (params->cmd) {
5250 case ECORE_F_CMD_HW_INIT:
5251 return ecore_func_hw_init(sc, params);
5252 case ECORE_F_CMD_START:
5253 return ecore_func_send_start(sc, params);
5254 case ECORE_F_CMD_STOP:
5255 return ecore_func_send_stop(sc, params);
5256 case ECORE_F_CMD_HW_RESET:
5257 return ecore_func_hw_reset(sc, params);
5258 case ECORE_F_CMD_AFEX_UPDATE:
5259 return ecore_func_send_afex_update(sc, params);
5260 case ECORE_F_CMD_AFEX_VIFLISTS:
5261 return ecore_func_send_afex_viflists(sc, params);
5262 case ECORE_F_CMD_TX_STOP:
5263 return ecore_func_send_tx_stop(sc, params);
5264 case ECORE_F_CMD_TX_START:
5265 return ecore_func_send_tx_start(sc, params);
5266 case ECORE_F_CMD_SWITCH_UPDATE:
5267 return ecore_func_send_switch_update(sc, params);
5269 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
5274 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5275 struct ecore_func_sp_obj *obj,
5276 void *rdata, ecore_dma_addr_t rdata_mapping,
5277 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5278 struct ecore_func_sp_drv_ops *drv_iface)
5280 ECORE_MEMSET(obj, 0, sizeof(*obj));
5282 ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5285 obj->rdata_mapping = rdata_mapping;
5286 obj->afex_rdata = afex_rdata;
5287 obj->afex_rdata_mapping = afex_rdata_mapping;
5288 obj->send_cmd = ecore_func_send_cmd;
5289 obj->check_transition = ecore_func_chk_transition;
5290 obj->complete_cmd = ecore_func_comp_cmd;
5291 obj->wait_comp = ecore_func_wait_comp;
5292 obj->drv = drv_iface;
5296 * ecore_func_state_change - perform Function state change transition
5298 * @sc: device handle
5299 * @params: parameters to perform the transaction
5301 * returns 0 in case of successfully completed transition,
5302 * negative error code in case of failure, positive
5303 * (EBUSY) value if there is a completion to that is
5304 * still pending (possible only if RAMROD_COMP_WAIT is
5305 * not set in params->ramrod_flags for asynchronous
5308 int ecore_func_state_change(struct bnx2x_softc *sc,
5309 struct ecore_func_state_params *params)
5311 struct ecore_func_sp_obj *o = params->f_obj;
5313 enum ecore_func_cmd cmd = params->cmd;
5314 unsigned long *pending = &o->pending;
5316 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5318 /* Check that the requested transition is legal */
5319 rc = o->check_transition(sc, o, params);
5320 if ((rc == ECORE_BUSY) &&
5321 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5322 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5323 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5325 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5326 rc = o->check_transition(sc, o, params);
5328 if (rc == ECORE_BUSY) {
5329 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5330 PMD_DRV_LOG(ERR, sc,
5331 "timeout waiting for previous ramrod completion");
5335 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5339 /* Set "pending" bit */
5340 ECORE_SET_BIT(cmd, pending);
5342 /* Don't send a command if only driver cleanup was requested */
5343 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5344 ecore_func_state_change_comp(sc, o, cmd);
5345 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5348 rc = o->send_cmd(sc, params);
5350 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5353 o->next_state = ECORE_F_STATE_MAX;
5354 ECORE_CLEAR_BIT(cmd, pending);
5355 ECORE_SMP_MB_AFTER_CLEAR_BIT();
5359 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5360 rc = o->wait_comp(sc, o, cmd);
5364 return ECORE_SUCCESS;
5368 return ECORE_RET_PENDING(cmd, pending);
5371 /******************************************************************************
5373 * Calculates crc 8 on a word value: polynomial 0-1-2-8
5374 * Code was translated from Verilog.
5376 *****************************************************************************/
5377 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5385 /* split the data into 31 bits */
5386 for (i = 0; i < 32; i++) {
5387 D[i] = (uint8_t) (data & 1);
5391 /* split the crc into 8 bits */
5392 for (i = 0; i < 8; i++) {
5397 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5398 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5400 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5401 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5402 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5403 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5404 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5405 C[0] ^ C[1] ^ C[4] ^ C[5];
5406 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5407 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5408 C[1] ^ C[2] ^ C[5] ^ C[6];
5409 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5410 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5411 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5412 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5413 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5415 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5416 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5417 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5418 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5421 for (i = 0; i < 8; i++) {
5422 crc_res |= (NewCRC[i] << i);
5429 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5434 for (i = 0; i < 8; i++)
5435 crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);