1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015-2018 Cavium Inc.
10 * All rights reserved.
15 #include "ecore_init.h"
17 /**** Exe Queue interfaces ****/
20 * ecore_exe_queue_init - init the Exe Queue object
22 * @o: pointer to the object
24 * @owner: pointer to the owner
25 * @validate: validate function pointer
26 * @optimize: optimize function pointer
27 * @exec: execute function pointer
28 * @get: get function pointer
31 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
32 struct ecore_exe_queue_obj *o,
34 union ecore_qable_obj *owner,
35 exe_q_validate validate,
37 exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
39 ECORE_MEMSET(o, 0, sizeof(*o));
41 ECORE_LIST_INIT(&o->exe_queue);
42 ECORE_LIST_INIT(&o->pending_comp);
44 ECORE_SPIN_LOCK_INIT(&o->lock, sc);
46 o->exe_chunk_len = exe_len;
49 /* Owner specific callbacks */
50 o->validate = validate;
52 o->optimize = optimize;
56 ECORE_MSG("Setup the execution queue with the chunk length of %d",
60 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
61 struct ecore_exeq_elem *elem)
63 ECORE_MSG("Deleting an exe_queue element");
64 ECORE_FREE(sc, elem, sizeof(*elem));
67 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
69 struct ecore_exeq_elem *elem;
72 ECORE_SPIN_LOCK_BH(&o->lock);
74 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
75 struct ecore_exeq_elem) cnt++;
77 ECORE_SPIN_UNLOCK_BH(&o->lock);
83 * ecore_exe_queue_add - add a new element to the execution queue
87 * @cmd: new command to add
88 * @restore: true - do not optimize the command
90 * If the element is optimized or is illegal, frees it.
92 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
93 struct ecore_exe_queue_obj *o,
94 struct ecore_exeq_elem *elem, int restore)
98 ECORE_SPIN_LOCK_BH(&o->lock);
101 /* Try to cancel this element queue */
102 rc = o->optimize(sc, o->owner, elem);
106 /* Check if this request is ok */
107 rc = o->validate(sc, o->owner, elem);
109 ECORE_MSG("Preamble failed: %d", rc);
114 /* If so, add it to the execution queue */
115 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
117 ECORE_SPIN_UNLOCK_BH(&o->lock);
119 return ECORE_SUCCESS;
122 ecore_exe_queue_free_elem(sc, elem);
124 ECORE_SPIN_UNLOCK_BH(&o->lock);
129 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
132 struct ecore_exeq_elem *elem;
134 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
135 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
136 struct ecore_exeq_elem, link);
138 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
139 ecore_exe_queue_free_elem(sc, elem);
143 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
144 struct ecore_exe_queue_obj *o)
146 ECORE_SPIN_LOCK_BH(&o->lock);
148 __ecore_exe_queue_reset_pending(sc, o);
150 ECORE_SPIN_UNLOCK_BH(&o->lock);
154 * ecore_exe_queue_step - execute one execution chunk atomically
158 * @ramrod_flags: flags
160 * (Should be called while holding the exe_queue->lock).
162 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
163 struct ecore_exe_queue_obj *o,
164 unsigned long *ramrod_flags)
166 struct ecore_exeq_elem *elem, spacer;
169 ECORE_MEMSET(&spacer, 0, sizeof(spacer));
171 /* Next step should not be performed until the current is finished,
172 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
173 * properly clear object internals without sending any command to the FW
174 * which also implies there won't be any completion to clear the
177 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
178 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
180 ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
181 __ecore_exe_queue_reset_pending(sc, o);
183 return ECORE_PENDING;
187 /* Run through the pending commands list and create a next
190 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
191 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
192 struct ecore_exeq_elem, link);
193 ECORE_DBG_BREAK_IF(!elem->cmd_len);
195 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
196 cur_len += elem->cmd_len;
197 /* Prevent from both lists being empty when moving an
198 * element. This will allow the call of
199 * ecore_exe_queue_empty() without locking.
201 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
203 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
204 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
205 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
212 return ECORE_SUCCESS;
214 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
216 /* In case of an error return the commands back to the queue
217 * and reset the pending_comp.
219 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
221 /* If zero is returned, means there are no outstanding pending
222 * completions and we may dismiss the pending list.
224 __ecore_exe_queue_reset_pending(sc, o);
229 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
231 int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
233 /* Don't reorder!!! */
236 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
239 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
243 ECORE_MSG("Allocating a new exe_queue element");
244 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
247 /************************ raw_obj functions ***********************************/
248 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
251 * !! converts the value returned by ECORE_TEST_BIT such that it
252 * is guaranteed not to be truncated regardless of int definition.
254 * Note we cannot simply define the function's return value type
255 * to match the type returned by ECORE_TEST_BIT, as it varies by
256 * platform/implementation.
259 return ! !ECORE_TEST_BIT(o->state, o->pstate);
262 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
264 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
265 ECORE_CLEAR_BIT(o->state, o->pstate);
266 ECORE_SMP_MB_AFTER_CLEAR_BIT();
269 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
271 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
272 ECORE_SET_BIT(o->state, o->pstate);
273 ECORE_SMP_MB_AFTER_CLEAR_BIT();
277 * ecore_state_wait - wait until the given bit(state) is cleared
280 * @state: state which is to be cleared
281 * @state_p: state buffer
284 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
285 unsigned long *pstate)
287 /* can take a while if any port is running */
290 if (CHIP_REV_IS_EMUL(sc))
293 ECORE_MSG("waiting for state to become %d", state);
297 bnx2x_intr_legacy(sc, 1);
298 if (!ECORE_TEST_BIT(state, pstate)) {
299 #ifdef ECORE_STOP_ON_ERROR
300 ECORE_MSG("exit (cnt %d)", 5000 - cnt);
302 return ECORE_SUCCESS;
305 ECORE_WAIT(sc, delay_us);
312 PMD_DRV_LOG(ERR, "timeout waiting for state %d", state);
313 #ifdef ECORE_STOP_ON_ERROR
317 return ECORE_TIMEOUT;
320 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
322 return ecore_state_wait(sc, raw->state, raw->pstate);
325 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
326 /* credit handling callbacks */
327 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
329 struct ecore_credit_pool_obj *mp = o->macs_pool;
331 ECORE_DBG_BREAK_IF(!mp);
333 return mp->get_entry(mp, offset);
336 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
338 struct ecore_credit_pool_obj *mp = o->macs_pool;
340 ECORE_DBG_BREAK_IF(!mp);
342 return mp->get(mp, 1);
345 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
347 struct ecore_credit_pool_obj *mp = o->macs_pool;
349 return mp->put_entry(mp, offset);
352 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
354 struct ecore_credit_pool_obj *mp = o->macs_pool;
356 return mp->put(mp, 1);
360 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
364 * @o: vlan_mac object
366 * @details: Non-blocking implementation; should be called under execution
369 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
370 struct ecore_vlan_mac_obj *o)
372 if (o->head_reader) {
373 ECORE_MSG("vlan_mac_lock writer - There are readers; Busy");
377 ECORE_MSG("vlan_mac_lock writer - Taken");
378 return ECORE_SUCCESS;
382 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
383 * which wasn't able to run due to a taken lock on vlan mac head list.
386 * @o: vlan_mac object
388 * @details Should be called under execution queue lock; notice it might release
389 * and reclaim it during its run.
391 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
392 struct ecore_vlan_mac_obj *o)
395 unsigned long ramrod_flags = o->saved_ramrod_flags;
397 ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu",
399 o->head_exe_request = FALSE;
400 o->saved_ramrod_flags = 0;
401 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
402 if (rc != ECORE_SUCCESS) {
404 "execution of pending commands failed with rc %d",
406 #ifdef ECORE_STOP_ON_ERROR
413 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
414 * called due to vlan mac head list lock being taken.
417 * @o: vlan_mac object
418 * @ramrod_flags: ramrod flags of missed execution
420 * @details Should be called under execution queue lock.
422 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
423 struct ecore_vlan_mac_obj *o,
424 unsigned long ramrod_flags)
426 o->head_exe_request = TRUE;
427 o->saved_ramrod_flags = ramrod_flags;
428 ECORE_MSG("Placing pending execution with ramrod flags %lu",
433 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
436 * @o: vlan_mac object
438 * @details Should be called under execution queue lock. Notice if a pending
439 * execution exists, it would perform it - possibly releasing and
440 * reclaiming the execution queue lock.
442 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
443 struct ecore_vlan_mac_obj *o)
445 /* It's possible a new pending execution was added since this writer
446 * executed. If so, execute again. [Ad infinitum]
448 while (o->head_exe_request) {
450 ("vlan_mac_lock - writer release encountered a pending request");
451 __ecore_vlan_mac_h_exec_pending(sc, o);
456 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
459 * @o: vlan_mac object
461 * @details Notice if a pending execution exists, it would perform it -
462 * possibly releasing and reclaiming the execution queue lock.
464 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
465 struct ecore_vlan_mac_obj *o)
467 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
468 __ecore_vlan_mac_h_write_unlock(sc, o);
469 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
473 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
476 * @o: vlan_mac object
478 * @details Should be called under the execution queue lock. May sleep. May
479 * release and reclaim execution queue lock during its run.
481 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
482 struct ecore_vlan_mac_obj *o)
484 /* If we got here, we're holding lock --> no WRITER exists */
486 ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader);
488 return ECORE_SUCCESS;
492 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
495 * @o: vlan_mac object
497 * @details May sleep. Claims and releases execution queue lock during its run.
499 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
500 struct ecore_vlan_mac_obj *o)
504 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
505 rc = __ecore_vlan_mac_h_read_lock(sc, o);
506 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
512 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
515 * @o: vlan_mac object
517 * @details Should be called under execution queue lock. Notice if a pending
518 * execution exists, it would be performed if this was the last
519 * reader. possibly releasing and reclaiming the execution queue lock.
521 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
522 struct ecore_vlan_mac_obj *o)
524 if (!o->head_reader) {
526 "Need to release vlan mac reader lock, but lock isn't taken");
527 #ifdef ECORE_STOP_ON_ERROR
533 "vlan_mac_lock - decreased readers to %d",
537 /* It's possible a new pending execution was added, and that this reader
538 * was last - if so we need to execute the command.
540 if (!o->head_reader && o->head_exe_request) {
542 "vlan_mac_lock - reader release encountered a pending request");
544 /* Writer release will do the trick */
545 __ecore_vlan_mac_h_write_unlock(sc, o);
550 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
553 * @o: vlan_mac object
555 * @details Notice if a pending execution exists, it would be performed if this
556 * was the last reader. Claims and releases the execution queue lock
559 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
560 struct ecore_vlan_mac_obj *o)
562 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
563 __ecore_vlan_mac_h_read_unlock(sc, o);
564 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
568 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
571 * @o: vlan_mac object
572 * @n: number of elements to get
573 * @base: base address for element placement
574 * @stride: stride between elements (in bytes)
576 static int ecore_get_n_elements(struct bnx2x_softc *sc,
577 struct ecore_vlan_mac_obj *o, int n,
578 uint8_t * base, uint8_t stride, uint8_t size)
580 struct ecore_vlan_mac_registry_elem *pos;
581 uint8_t *next = base;
582 int counter = 0, read_lock;
584 ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)");
585 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
586 if (read_lock != ECORE_SUCCESS)
588 "get_n_elements failed to get vlan mac reader lock; Access without lock");
591 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
592 struct ecore_vlan_mac_registry_elem) {
594 ECORE_MEMCPY(next, &pos->u, size);
597 ("copied element number %d to address %p element was:",
599 next += stride + size;
603 if (read_lock == ECORE_SUCCESS) {
604 ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)");
605 ecore_vlan_mac_h_read_unlock(sc, o);
608 return counter * ETH_ALEN;
611 /* check_add() callbacks */
612 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
613 struct ecore_vlan_mac_obj *o,
614 union ecore_classification_ramrod_data *data)
616 struct ecore_vlan_mac_registry_elem *pos;
618 ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
619 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
620 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
622 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
625 /* Check if a requested MAC already exists */
626 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
627 struct ecore_vlan_mac_registry_elem)
628 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
629 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
632 return ECORE_SUCCESS;
635 /* check_del() callbacks */
636 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
642 ecore_classification_ramrod_data
645 struct ecore_vlan_mac_registry_elem *pos;
647 ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
648 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
649 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
651 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
652 struct ecore_vlan_mac_registry_elem)
653 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
654 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
660 /* check_move() callback */
661 static int ecore_check_move(struct bnx2x_softc *sc,
662 struct ecore_vlan_mac_obj *src_o,
663 struct ecore_vlan_mac_obj *dst_o,
664 union ecore_classification_ramrod_data *data)
666 struct ecore_vlan_mac_registry_elem *pos;
669 /* Check if we can delete the requested configuration from the first
672 pos = src_o->check_del(sc, src_o, data);
674 /* check if configuration can be added */
675 rc = dst_o->check_add(sc, dst_o, data);
677 /* If this classification can not be added (is already set)
678 * or can't be deleted - return an error.
686 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
687 __rte_unused struct ecore_vlan_mac_obj
688 *src_o, __rte_unused struct ecore_vlan_mac_obj
689 *dst_o, __rte_unused union
690 ecore_classification_ramrod_data *data)
695 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
698 struct ecore_raw_obj *raw = &o->raw;
699 uint8_t rx_tx_flag = 0;
701 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
702 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
703 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
705 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
706 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
707 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
712 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
713 int add, unsigned char *dev_addr, int index)
716 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
717 NIG_REG_LLH0_FUNC_MEM;
719 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
722 if (index > ECORE_LLH_CAM_MAX_PF_LINE)
725 ECORE_MSG("Going to %s LLH configuration at entry %d",
726 (add ? "ADD" : "DELETE"), index);
729 /* LLH_FUNC_MEM is a uint64_t WB register */
730 reg_offset += 8 * index;
732 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
733 (dev_addr[4] << 8) | dev_addr[5]);
734 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
736 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
739 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
740 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
744 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
747 * @o: queue for which we want to configure this rule
748 * @add: if TRUE the command is an ADD command, DEL otherwise
749 * @opcode: CLASSIFY_RULE_OPCODE_XXX
750 * @hdr: pointer to a header to setup
753 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
755 struct eth_classify_cmd_header
758 struct ecore_raw_obj *raw = &o->raw;
760 hdr->client_id = raw->cl_id;
761 hdr->func_id = raw->func_id;
763 /* Rx or/and Tx (internal switching) configuration ? */
764 hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
767 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
769 hdr->cmd_general_data |=
770 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
774 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
776 * @cid: connection id
777 * @type: ECORE_FILTER_XXX_PENDING
778 * @hdr: pointer to header to setup
781 * currently we always configure one rule and echo field to contain a CID and an
784 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
787 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
788 (type << ECORE_SWCID_SHIFT));
789 hdr->rule_cnt = (uint8_t) rule_cnt;
792 /* hw_config() callbacks */
793 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
794 struct ecore_vlan_mac_obj *o,
795 struct ecore_exeq_elem *elem, int rule_idx,
796 __rte_unused int cam_offset)
798 struct ecore_raw_obj *raw = &o->raw;
799 struct eth_classify_rules_ramrod_data *data =
800 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
801 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
802 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
803 int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
804 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
805 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
807 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
808 * relevant. In addition, current implementation is tuned for a
811 * When multiple unicast ETH MACs PF configuration in switch
812 * independent mode is required (NetQ, multiple netdev MACs,
813 * etc.), consider better utilisation of 8 per function MAC
814 * entries in the LLH register. There is also
815 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
816 * total number of CAM entries to 16.
818 * Currently we won't configure NIG for MACs other than a primary ETH
819 * MAC and iSCSI L2 MAC.
821 * If this MAC is moving from one Queue to another, no need to change
824 if (cmd != ECORE_VLAN_MAC_MOVE) {
825 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
826 ecore_set_mac_in_nig(sc, add, mac,
827 ECORE_LLH_CAM_ISCSI_ETH_LINE);
828 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
829 ecore_set_mac_in_nig(sc, add, mac,
830 ECORE_LLH_CAM_ETH_LINE);
833 /* Reset the ramrod data buffer for the first rule */
835 ECORE_MEMSET(data, 0, sizeof(*data));
837 /* Setup a command header */
838 ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
839 &rule_entry->mac.header);
841 ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
842 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
843 mac[4], mac[5], raw->cl_id);
845 /* Set a MAC itself */
846 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
847 &rule_entry->mac.mac_mid,
848 &rule_entry->mac.mac_lsb, mac);
849 rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
851 /* MOVE: Add a rule that will add this MAC to the target Queue */
852 if (cmd == ECORE_VLAN_MAC_MOVE) {
856 /* Setup ramrod data */
857 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
858 vlan_mac.target_obj, TRUE,
859 CLASSIFY_RULE_OPCODE_MAC,
860 &rule_entry->mac.header);
862 /* Set a MAC itself */
863 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
864 &rule_entry->mac.mac_mid,
865 &rule_entry->mac.mac_lsb, mac);
866 rule_entry->mac.inner_mac =
867 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
870 /* Set the ramrod data header */
871 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
876 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
881 * @cam_offset: offset in cam memory
882 * @hdr: pointer to a header to setup
886 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
887 *o, int type, int cam_offset, struct mac_configuration_hdr
890 struct ecore_raw_obj *r = &o->raw;
893 hdr->offset = (uint8_t) cam_offset;
894 hdr->client_id = ECORE_CPU_TO_LE16(0xff);
895 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
896 (type << ECORE_SWCID_SHIFT));
899 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
900 *o, int add, int opcode,
902 uint16_t vlan_id, struct
903 mac_configuration_entry
906 struct ecore_raw_obj *r = &o->raw;
907 uint32_t cl_bit_vec = (1 << r->cl_id);
909 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
910 cfg_entry->pf_id = r->func_id;
911 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
914 ECORE_SET_FLAG(cfg_entry->flags,
915 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
916 T_ETH_MAC_COMMAND_SET);
917 ECORE_SET_FLAG(cfg_entry->flags,
918 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
921 /* Set a MAC in a ramrod data */
922 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
923 &cfg_entry->middle_mac_addr,
924 &cfg_entry->lsb_mac_addr, mac);
926 ECORE_SET_FLAG(cfg_entry->flags,
927 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
928 T_ETH_MAC_COMMAND_INVALIDATE);
931 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
933 struct ecore_vlan_mac_obj *o,
934 int type, int cam_offset,
935 int add, uint8_t * mac,
936 uint16_t vlan_id, int opcode,
937 struct mac_configuration_cmd
940 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
942 ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
943 ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
946 ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
947 (add ? "setting" : "clearing"),
948 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
949 o->raw.cl_id, cam_offset);
953 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
956 * @o: ecore_vlan_mac_obj
957 * @elem: ecore_exeq_elem
958 * @rule_idx: rule_idx
959 * @cam_offset: cam_offset
961 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
962 struct ecore_vlan_mac_obj *o,
963 struct ecore_exeq_elem *elem,
964 __rte_unused int rule_idx, int cam_offset)
966 struct ecore_raw_obj *raw = &o->raw;
967 struct mac_configuration_cmd *config =
968 (struct mac_configuration_cmd *)(raw->rdata);
969 /* 57711 do not support MOVE command,
970 * so it's either ADD or DEL
972 int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
975 /* Reset the ramrod data buffer */
976 ECORE_MEMSET(config, 0, sizeof(*config));
978 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
980 elem->cmd_data.vlan_mac.u.mac.mac, 0,
981 ETH_VLAN_FILTER_ANY_VLAN, config);
985 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
988 * @p: command parameters
989 * @ppos: pointer to the cookie
991 * reconfigure next MAC/VLAN/VLAN-MAC element from the
992 * previously configured elements list.
994 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
997 * pointer to the cookie - that should be given back in the next call to make
998 * function handle the next element. If *ppos is set to NULL it will restart the
999 * iterator. If returned *ppos == NULL this means that the last element has been
1003 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1004 struct ecore_vlan_mac_ramrod_params *p,
1005 struct ecore_vlan_mac_registry_elem **ppos)
1007 struct ecore_vlan_mac_registry_elem *pos;
1008 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1010 /* If list is empty - there is nothing to do here */
1011 if (ECORE_LIST_IS_EMPTY(&o->head)) {
1016 /* make a step... */
1018 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1019 ecore_vlan_mac_registry_elem,
1022 *ppos = ECORE_LIST_NEXT(*ppos, link,
1023 struct ecore_vlan_mac_registry_elem);
1027 /* If it's the last step - return NULL */
1028 if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1031 /* Prepare a 'user_req' */
1032 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1034 /* Set the command */
1035 p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1037 /* Set vlan_mac_flags */
1038 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1040 /* Set a restore bit */
1041 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1043 return ecore_config_vlan_mac(sc, p);
1046 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1047 * pointer to an element with a specific criteria and NULL if such an element
1048 * hasn't been found.
1050 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1051 struct ecore_exeq_elem *elem)
1053 struct ecore_exeq_elem *pos;
1054 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1056 /* Check pending for execution commands */
1057 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1058 struct ecore_exeq_elem)
1059 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1061 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1068 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1070 * @sc: device handle
1071 * @qo: ecore_qable_obj
1072 * @elem: ecore_exeq_elem
1074 * Checks that the requested configuration can be added. If yes and if
1075 * requested, consume CAM credit.
1077 * The 'validate' is run after the 'optimize'.
1080 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1081 union ecore_qable_obj *qo,
1082 struct ecore_exeq_elem *elem)
1084 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1085 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1088 /* Check the registry */
1089 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1092 ("ADD command is not allowed considering current registry state.");
1096 /* Check if there is a pending ADD command for this
1097 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1099 if (exeq->get(exeq, elem)) {
1100 ECORE_MSG("There is a pending ADD command already");
1101 return ECORE_EXISTS;
1104 /* Consume the credit if not requested not to */
1105 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1106 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1110 return ECORE_SUCCESS;
1114 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1116 * @sc: device handle
1117 * @qo: quable object to check
1118 * @elem: element that needs to be deleted
1120 * Checks that the requested configuration can be deleted. If yes and if
1121 * requested, returns a CAM credit.
1123 * The 'validate' is run after the 'optimize'.
1125 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1126 union ecore_qable_obj *qo,
1127 struct ecore_exeq_elem *elem)
1129 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1130 struct ecore_vlan_mac_registry_elem *pos;
1131 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1132 struct ecore_exeq_elem query_elem;
1134 /* If this classification can not be deleted (doesn't exist)
1135 * - return a ECORE_EXIST.
1137 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1140 ("DEL command is not allowed considering current registry state");
1141 return ECORE_EXISTS;
1144 /* Check if there are pending DEL or MOVE commands for this
1145 * MAC/VLAN/VLAN-MAC. Return an error if so.
1147 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1149 /* Check for MOVE commands */
1150 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1151 if (exeq->get(exeq, &query_elem)) {
1152 PMD_DRV_LOG(ERR, "There is a pending MOVE command already");
1156 /* Check for DEL commands */
1157 if (exeq->get(exeq, elem)) {
1158 ECORE_MSG("There is a pending DEL command already");
1159 return ECORE_EXISTS;
1162 /* Return the credit to the credit pool if not requested not to */
1163 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1164 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1165 o->put_credit(o))) {
1166 PMD_DRV_LOG(ERR, "Failed to return a credit");
1170 return ECORE_SUCCESS;
1174 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1176 * @sc: device handle
1177 * @qo: quable object to check (source)
1178 * @elem: element that needs to be moved
1180 * Checks that the requested configuration can be moved. If yes and if
1181 * requested, returns a CAM credit.
1183 * The 'validate' is run after the 'optimize'.
1185 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1186 union ecore_qable_obj *qo,
1187 struct ecore_exeq_elem *elem)
1189 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1190 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1191 struct ecore_exeq_elem query_elem;
1192 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1193 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1195 /* Check if we can perform this operation based on the current registry
1198 if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1200 ("MOVE command is not allowed considering current registry state");
1204 /* Check if there is an already pending DEL or MOVE command for the
1205 * source object or ADD command for a destination object. Return an
1208 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1210 /* Check DEL on source */
1211 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1212 if (src_exeq->get(src_exeq, &query_elem)) {
1214 "There is a pending DEL command on the source queue already");
1218 /* Check MOVE on source */
1219 if (src_exeq->get(src_exeq, elem)) {
1220 ECORE_MSG("There is a pending MOVE command already");
1221 return ECORE_EXISTS;
1224 /* Check ADD on destination */
1225 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1226 if (dest_exeq->get(dest_exeq, &query_elem)) {
1228 "There is a pending ADD command on the destination queue already");
1232 /* Consume the credit if not requested not to */
1233 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1234 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1235 dest_o->get_credit(dest_o)))
1238 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1239 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1240 src_o->put_credit(src_o))) {
1241 /* return the credit taken from dest... */
1242 dest_o->put_credit(dest_o);
1246 return ECORE_SUCCESS;
1249 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1250 union ecore_qable_obj *qo,
1251 struct ecore_exeq_elem *elem)
1253 switch (elem->cmd_data.vlan_mac.cmd) {
1254 case ECORE_VLAN_MAC_ADD:
1255 return ecore_validate_vlan_mac_add(sc, qo, elem);
1256 case ECORE_VLAN_MAC_DEL:
1257 return ecore_validate_vlan_mac_del(sc, qo, elem);
1258 case ECORE_VLAN_MAC_MOVE:
1259 return ecore_validate_vlan_mac_move(sc, qo, elem);
1265 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1266 union ecore_qable_obj *qo,
1267 struct ecore_exeq_elem *elem)
1271 /* If consumption wasn't required, nothing to do */
1272 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1273 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1274 return ECORE_SUCCESS;
1276 switch (elem->cmd_data.vlan_mac.cmd) {
1277 case ECORE_VLAN_MAC_ADD:
1278 case ECORE_VLAN_MAC_MOVE:
1279 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1281 case ECORE_VLAN_MAC_DEL:
1282 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1291 return ECORE_SUCCESS;
1295 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1297 * @sc: device handle
1298 * @o: ecore_vlan_mac_obj
1301 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1302 struct ecore_vlan_mac_obj *o)
1305 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1306 struct ecore_raw_obj *raw = &o->raw;
1309 /* Wait for the current command to complete */
1310 rc = raw->wait_comp(sc, raw);
1314 /* Wait until there are no pending commands */
1315 if (!ecore_exe_queue_empty(exeq))
1316 ECORE_WAIT(sc, 1000);
1318 return ECORE_SUCCESS;
1321 return ECORE_TIMEOUT;
1324 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1325 struct ecore_vlan_mac_obj *o,
1326 unsigned long *ramrod_flags)
1328 int rc = ECORE_SUCCESS;
1330 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1332 ECORE_MSG("vlan_mac_execute_step - trying to take writer lock");
1333 rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1335 if (rc != ECORE_SUCCESS) {
1336 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1338 /** Calling function should not diffrentiate between this case
1339 * and the case in which there is already a pending ramrod
1343 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1345 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1351 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1353 * @sc: device handle
1354 * @o: ecore_vlan_mac_obj
1356 * @cont: if TRUE schedule next execution chunk
1359 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1360 struct ecore_vlan_mac_obj *o,
1361 union event_ring_elem *cqe,
1362 unsigned long *ramrod_flags)
1364 struct ecore_raw_obj *r = &o->raw;
1367 /* Reset pending list */
1368 ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1371 r->clear_pending(r);
1373 /* If ramrod failed this is most likely a SW bug */
1374 if (cqe->message.error)
1377 /* Run the next bulk of pending commands if requested */
1378 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1379 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1384 /* If there is more work to do return PENDING */
1385 if (!ecore_exe_queue_empty(&o->exe_queue))
1386 return ECORE_PENDING;
1388 return ECORE_SUCCESS;
1392 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1394 * @sc: device handle
1395 * @o: ecore_qable_obj
1396 * @elem: ecore_exeq_elem
1398 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1399 union ecore_qable_obj *qo,
1400 struct ecore_exeq_elem *elem)
1402 struct ecore_exeq_elem query, *pos;
1403 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1404 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1406 ECORE_MEMCPY(&query, elem, sizeof(query));
1408 switch (elem->cmd_data.vlan_mac.cmd) {
1409 case ECORE_VLAN_MAC_ADD:
1410 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1412 case ECORE_VLAN_MAC_DEL:
1413 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1416 /* Don't handle anything other than ADD or DEL */
1420 /* If we found the appropriate element - delete it */
1421 pos = exeq->get(exeq, &query);
1424 /* Return the credit of the optimized command */
1425 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1426 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1427 if ((query.cmd_data.vlan_mac.cmd ==
1428 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1430 "Failed to return the credit for the optimized ADD command");
1432 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1434 "Failed to recover the credit from the optimized DEL command");
1439 ECORE_MSG("Optimizing %s command",
1440 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1443 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1444 ecore_exe_queue_free_elem(sc, pos);
1452 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1454 * @sc: device handle
1460 * prepare a registry element according to the current command request.
1462 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1463 struct ecore_vlan_mac_obj *o,
1464 struct ecore_exeq_elem *elem,
1466 ecore_vlan_mac_registry_elem
1469 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1470 struct ecore_vlan_mac_registry_elem *reg_elem;
1472 /* Allocate a new registry element if needed. */
1474 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1475 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1479 /* Get a new CAM offset */
1480 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1481 /* This shall never happen, because we have checked the
1482 * CAM availability in the 'validate'.
1484 ECORE_DBG_BREAK_IF(1);
1485 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1489 ECORE_MSG("Got cam offset %d", reg_elem->cam_offset);
1491 /* Set a VLAN-MAC data */
1492 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u,
1493 sizeof(reg_elem->u));
1495 /* Copy the flags (needed for DEL and RESTORE flows) */
1496 reg_elem->vlan_mac_flags =
1497 elem->cmd_data.vlan_mac.vlan_mac_flags;
1498 } else /* DEL, RESTORE */
1499 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1502 return ECORE_SUCCESS;
1506 * ecore_execute_vlan_mac - execute vlan mac command
1508 * @sc: device handle
1513 * go and send a ramrod!
1515 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1516 union ecore_qable_obj *qo,
1517 ecore_list_t * exe_chunk,
1518 unsigned long *ramrod_flags)
1520 struct ecore_exeq_elem *elem;
1521 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1522 struct ecore_raw_obj *r = &o->raw;
1524 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1525 int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1526 struct ecore_vlan_mac_registry_elem *reg_elem;
1527 enum ecore_vlan_mac_cmd cmd;
1529 /* If DRIVER_ONLY execution is requested, cleanup a registry
1530 * and exit. Otherwise send a ramrod to FW.
1537 /* Fill the ramrod data */
1538 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1539 struct ecore_exeq_elem) {
1540 cmd = elem->cmd_data.vlan_mac.cmd;
1541 /* We will add to the target object in MOVE command, so
1542 * change the object for a CAM search.
1544 if (cmd == ECORE_VLAN_MAC_MOVE)
1545 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1549 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1555 ECORE_DBG_BREAK_IF(!reg_elem);
1557 /* Push a new entry into the registry */
1559 ((cmd == ECORE_VLAN_MAC_ADD) ||
1560 (cmd == ECORE_VLAN_MAC_MOVE)))
1561 ECORE_LIST_PUSH_HEAD(®_elem->link,
1564 /* Configure a single command in a ramrod data buffer */
1565 o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1567 /* MOVE command consumes 2 entries in the ramrod data */
1568 if (cmd == ECORE_VLAN_MAC_MOVE)
1575 * No need for an explicit memory barrier here as long we would
1576 * need to ensure the ordering of writing to the SPQ element
1577 * and updating of the SPQ producer which involves a memory
1578 * read and we will have to put a full memory barrier there
1579 * (inside ecore_sp_post()).
1582 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1583 r->rdata_mapping, ETH_CONNECTION_TYPE);
1588 /* Now, when we are done with the ramrod - clean up the registry */
1589 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1590 cmd = elem->cmd_data.vlan_mac.cmd;
1591 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1592 reg_elem = o->check_del(sc, o,
1593 &elem->cmd_data.vlan_mac.u);
1595 ECORE_DBG_BREAK_IF(!reg_elem);
1597 o->put_cam_offset(o, reg_elem->cam_offset);
1598 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head);
1599 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1604 return ECORE_PENDING;
1606 return ECORE_SUCCESS;
1609 r->clear_pending(r);
1611 /* Cleanup a registry in case of a failure */
1612 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1613 cmd = elem->cmd_data.vlan_mac.cmd;
1615 if (cmd == ECORE_VLAN_MAC_MOVE)
1616 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1620 /* Delete all newly added above entries */
1622 ((cmd == ECORE_VLAN_MAC_ADD) ||
1623 (cmd == ECORE_VLAN_MAC_MOVE))) {
1624 reg_elem = o->check_del(sc, cam_obj,
1625 &elem->cmd_data.vlan_mac.u);
1627 ECORE_LIST_REMOVE_ENTRY(®_elem->link,
1629 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1637 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1638 ecore_vlan_mac_ramrod_params *p)
1640 struct ecore_exeq_elem *elem;
1641 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1642 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1644 /* Allocate the execution queue element */
1645 elem = ecore_exe_queue_alloc_elem(sc);
1649 /* Set the command 'length' */
1650 switch (p->user_req.cmd) {
1651 case ECORE_VLAN_MAC_MOVE:
1658 /* Fill the object specific info */
1659 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1660 sizeof(p->user_req));
1662 /* Try to add a new command to the pending list */
1663 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1667 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1669 * @sc: device handle
1673 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1674 struct ecore_vlan_mac_ramrod_params *p)
1676 int rc = ECORE_SUCCESS;
1677 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1678 unsigned long *ramrod_flags = &p->ramrod_flags;
1679 int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1680 struct ecore_raw_obj *raw = &o->raw;
1683 * Add new elements to the execution list for commands that require it.
1686 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1691 /* If nothing will be executed further in this iteration we want to
1692 * return PENDING if there are pending commands
1694 if (!ecore_exe_queue_empty(&o->exe_queue))
1697 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1699 ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1700 raw->clear_pending(raw);
1703 /* Execute commands if required */
1704 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1705 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1706 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1712 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1713 * then user want to wait until the last command is done.
1715 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1716 /* Wait maximum for the current exe_queue length iterations plus
1717 * one (for the current pending command).
1719 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1721 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1724 /* Wait for the current command to complete */
1725 rc = raw->wait_comp(sc, raw);
1729 /* Make a next step */
1730 rc = __ecore_vlan_mac_execute_step(sc,
1737 return ECORE_SUCCESS;
1744 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1746 * @sc: device handle
1749 * @ramrod_flags: execution flags to be used for this deletion
1751 * if the last operation has completed successfully and there are no
1752 * more elements left, positive value if the last operation has completed
1753 * successfully and there are more previously configured elements, negative
1754 * value is current operation has failed.
1756 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1757 struct ecore_vlan_mac_obj *o,
1758 unsigned long *vlan_mac_flags,
1759 unsigned long *ramrod_flags)
1761 struct ecore_vlan_mac_registry_elem *pos = NULL;
1762 int rc = 0, read_lock;
1763 struct ecore_vlan_mac_ramrod_params p;
1764 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1765 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1767 /* Clear pending commands first */
1769 ECORE_SPIN_LOCK_BH(&exeq->lock);
1771 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1772 &exeq->exe_queue, link,
1773 struct ecore_exeq_elem) {
1774 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1776 rc = exeq->remove(sc, exeq->owner, exeq_pos);
1778 PMD_DRV_LOG(ERR, "Failed to remove command");
1779 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1782 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1784 ecore_exe_queue_free_elem(sc, exeq_pos);
1788 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1790 /* Prepare a command request */
1791 ECORE_MEMSET(&p, 0, sizeof(p));
1793 p.ramrod_flags = *ramrod_flags;
1794 p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1796 /* Add all but the last VLAN-MAC to the execution queue without actually
1797 * execution anything.
1799 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1800 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1801 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1803 ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1804 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1805 if (read_lock != ECORE_SUCCESS)
1808 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1809 struct ecore_vlan_mac_registry_elem) {
1810 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1811 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1812 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1813 rc = ecore_config_vlan_mac(sc, &p);
1816 "Failed to add a new DEL command");
1817 ecore_vlan_mac_h_read_unlock(sc, o);
1823 ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1824 ecore_vlan_mac_h_read_unlock(sc, o);
1826 p.ramrod_flags = *ramrod_flags;
1827 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1829 return ecore_config_vlan_mac(sc, &p);
1832 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1833 uint32_t cid, uint8_t func_id,
1835 ecore_dma_addr_t rdata_mapping, int state,
1836 unsigned long *pstate, ecore_obj_type type)
1838 raw->func_id = func_id;
1842 raw->rdata_mapping = rdata_mapping;
1844 raw->pstate = pstate;
1845 raw->obj_type = type;
1846 raw->check_pending = ecore_raw_check_pending;
1847 raw->clear_pending = ecore_raw_clear_pending;
1848 raw->set_pending = ecore_raw_set_pending;
1849 raw->wait_comp = ecore_raw_wait;
1852 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1853 uint8_t cl_id, uint32_t cid,
1854 uint8_t func_id, void *rdata,
1855 ecore_dma_addr_t rdata_mapping,
1856 int state, unsigned long *pstate,
1857 ecore_obj_type type,
1858 struct ecore_credit_pool_obj
1859 *macs_pool, struct ecore_credit_pool_obj
1862 ECORE_LIST_INIT(&o->head);
1864 o->head_exe_request = FALSE;
1865 o->saved_ramrod_flags = 0;
1867 o->macs_pool = macs_pool;
1868 o->vlans_pool = vlans_pool;
1870 o->delete_all = ecore_vlan_mac_del_all;
1871 o->restore = ecore_vlan_mac_restore;
1872 o->complete = ecore_complete_vlan_mac;
1873 o->wait = ecore_wait_vlan_mac;
1875 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1876 state, pstate, type);
1879 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1880 struct ecore_vlan_mac_obj *mac_obj,
1881 uint8_t cl_id, uint32_t cid, uint8_t func_id,
1882 void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1883 unsigned long *pstate, ecore_obj_type type,
1884 struct ecore_credit_pool_obj *macs_pool)
1886 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1888 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1889 rdata_mapping, state, pstate, type,
1892 /* CAM credit pool handling */
1893 mac_obj->get_credit = ecore_get_credit_mac;
1894 mac_obj->put_credit = ecore_put_credit_mac;
1895 mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1896 mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1898 if (CHIP_IS_E1x(sc)) {
1899 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1900 mac_obj->check_del = ecore_check_mac_del;
1901 mac_obj->check_add = ecore_check_mac_add;
1902 mac_obj->check_move = ecore_check_move_always_err;
1903 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1906 ecore_exe_queue_init(sc,
1907 &mac_obj->exe_queue, 1, qable_obj,
1908 ecore_validate_vlan_mac,
1909 ecore_remove_vlan_mac,
1910 ecore_optimize_vlan_mac,
1911 ecore_execute_vlan_mac,
1912 ecore_exeq_get_mac);
1914 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1915 mac_obj->check_del = ecore_check_mac_del;
1916 mac_obj->check_add = ecore_check_mac_add;
1917 mac_obj->check_move = ecore_check_move;
1918 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1919 mac_obj->get_n_elements = ecore_get_n_elements;
1922 ecore_exe_queue_init(sc,
1923 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1924 qable_obj, ecore_validate_vlan_mac,
1925 ecore_remove_vlan_mac,
1926 ecore_optimize_vlan_mac,
1927 ecore_execute_vlan_mac,
1928 ecore_exeq_get_mac);
1932 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1933 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1934 tstorm_eth_mac_filter_config
1935 *mac_filters, uint16_t pf_id)
1937 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1939 uint32_t addr = BAR_TSTRORM_INTMEM +
1940 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1942 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1945 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1946 struct ecore_rx_mode_ramrod_params *p)
1948 /* update the sc MAC filter structure */
1949 uint32_t mask = (1 << p->cl_id);
1951 struct tstorm_eth_mac_filter_config *mac_filters =
1952 (struct tstorm_eth_mac_filter_config *)p->rdata;
1954 /* initial setting is drop-all */
1955 uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1956 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1957 uint8_t unmatched_unicast = 0;
1959 /* In e1x there we only take into account rx accept flag since tx switching
1961 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1962 /* accept matched ucast */
1965 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1966 /* accept matched mcast */
1969 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1970 /* accept all mcast */
1974 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1975 /* accept all mcast */
1979 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1980 /* accept (all) bcast */
1982 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1983 /* accept unmatched unicasts */
1984 unmatched_unicast = 1;
1986 mac_filters->ucast_drop_all = drop_all_ucast ?
1987 mac_filters->ucast_drop_all | mask :
1988 mac_filters->ucast_drop_all & ~mask;
1990 mac_filters->mcast_drop_all = drop_all_mcast ?
1991 mac_filters->mcast_drop_all | mask :
1992 mac_filters->mcast_drop_all & ~mask;
1994 mac_filters->ucast_accept_all = accp_all_ucast ?
1995 mac_filters->ucast_accept_all | mask :
1996 mac_filters->ucast_accept_all & ~mask;
1998 mac_filters->mcast_accept_all = accp_all_mcast ?
1999 mac_filters->mcast_accept_all | mask :
2000 mac_filters->mcast_accept_all & ~mask;
2002 mac_filters->bcast_accept_all = accp_all_bcast ?
2003 mac_filters->bcast_accept_all | mask :
2004 mac_filters->bcast_accept_all & ~mask;
2006 mac_filters->unmatched_unicast = unmatched_unicast ?
2007 mac_filters->unmatched_unicast | mask :
2008 mac_filters->unmatched_unicast & ~mask;
2010 ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2011 "accp_mcast 0x%xaccp_bcast 0x%x",
2012 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2013 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2014 mac_filters->bcast_accept_all);
2016 /* write the MAC filter structure */
2017 __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2019 /* The operation is completed */
2020 ECORE_CLEAR_BIT(p->state, p->pstate);
2021 ECORE_SMP_MB_AFTER_CLEAR_BIT();
2023 return ECORE_SUCCESS;
2026 /* Setup ramrod data */
2027 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2028 *hdr, uint8_t rule_cnt)
2030 hdr->echo = ECORE_CPU_TO_LE32(cid);
2031 hdr->rule_cnt = rule_cnt;
2034 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2035 *cmd, int clear_accept_all)
2039 /* start with 'drop-all' */
2040 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2041 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2043 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2044 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2046 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2047 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2049 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2050 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2051 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2054 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2055 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2056 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2058 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2059 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2061 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2062 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2063 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2065 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2066 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2068 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2069 if (clear_accept_all) {
2070 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2071 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2072 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2073 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2076 cmd->state = ECORE_CPU_TO_LE16(state);
2079 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2080 struct ecore_rx_mode_ramrod_params *p)
2082 struct eth_filter_rules_ramrod_data *data = p->rdata;
2084 uint8_t rule_idx = 0;
2086 /* Reset the ramrod data buffer */
2087 ECORE_MEMSET(data, 0, sizeof(*data));
2089 /* Setup ramrod data */
2091 /* Tx (internal switching) */
2092 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2093 data->rules[rule_idx].client_id = p->cl_id;
2094 data->rules[rule_idx].func_id = p->func_id;
2096 data->rules[rule_idx].cmd_general_data =
2097 ETH_FILTER_RULES_CMD_TX_CMD;
2099 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2100 &(data->rules[rule_idx++]),
2105 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2106 data->rules[rule_idx].client_id = p->cl_id;
2107 data->rules[rule_idx].func_id = p->func_id;
2109 data->rules[rule_idx].cmd_general_data =
2110 ETH_FILTER_RULES_CMD_RX_CMD;
2112 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2113 &(data->rules[rule_idx++]),
2117 /* If FCoE Queue configuration has been requested configure the Rx and
2118 * internal switching modes for this queue in separate rules.
2120 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2121 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2123 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2124 /* Tx (internal switching) */
2125 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2126 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2127 data->rules[rule_idx].func_id = p->func_id;
2129 data->rules[rule_idx].cmd_general_data =
2130 ETH_FILTER_RULES_CMD_TX_CMD;
2132 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2134 [rule_idx++]), TRUE);
2138 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2139 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2140 data->rules[rule_idx].func_id = p->func_id;
2142 data->rules[rule_idx].cmd_general_data =
2143 ETH_FILTER_RULES_CMD_RX_CMD;
2145 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2147 [rule_idx++]), TRUE);
2151 /* Set the ramrod header (most importantly - number of rules to
2154 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2157 ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2158 data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2160 /* No need for an explicit memory barrier here as long we would
2161 * need to ensure the ordering of writing to the SPQ element
2162 * and updating of the SPQ producer which involves a memory
2163 * read and we will have to put a full memory barrier there
2164 * (inside ecore_sp_post()).
2168 rc = ecore_sp_post(sc,
2169 RAMROD_CMD_ID_ETH_FILTER_RULES,
2170 p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2174 /* Ramrod completion is pending */
2175 return ECORE_PENDING;
2178 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2179 struct ecore_rx_mode_ramrod_params *p)
2181 return ecore_state_wait(sc, p->state, p->pstate);
2184 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2186 ecore_rx_mode_ramrod_params *p)
2189 return ECORE_SUCCESS;
2192 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2193 struct ecore_rx_mode_ramrod_params *p)
2197 /* Configure the new classification in the chip */
2198 if (p->rx_mode_obj->config_rx_mode) {
2199 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2203 /* Wait for a ramrod completion if was requested */
2204 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2205 rc = p->rx_mode_obj->wait_comp(sc, p);
2210 ECORE_MSG("ERROR: config_rx_mode is NULL");
2217 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2219 if (CHIP_IS_E1x(sc)) {
2220 o->wait_comp = ecore_empty_rx_mode_wait;
2221 o->config_rx_mode = ecore_set_rx_mode_e1x;
2223 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2224 o->config_rx_mode = ecore_set_rx_mode_e2;
2228 /********************* Multicast verbs: SET, CLEAR ****************************/
2229 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2231 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2234 struct ecore_mcast_mac_elem {
2235 ecore_list_entry_t link;
2236 uint8_t mac[ETH_ALEN];
2237 uint8_t pad[2]; /* For a natural alignment of the following buffer */
2240 struct ecore_pending_mcast_cmd {
2241 ecore_list_entry_t link;
2242 int type; /* ECORE_MCAST_CMD_X */
2244 ecore_list_t macs_head;
2245 uint32_t macs_num; /* Needed for DEL command */
2246 int next_bin; /* Needed for RESTORE flow with aprox match */
2249 int done; /* set to TRUE, when the command has been handled,
2250 * practically used in 57712 handling only, where one pending
2251 * command may be handled in a few operations. As long as for
2252 * other chips every operation handling is completed in a
2253 * single ramrod, there is no need to utilize this field.
2257 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2259 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2260 o->raw.wait_comp(sc, &o->raw))
2261 return ECORE_TIMEOUT;
2263 return ECORE_SUCCESS;
2266 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2267 struct ecore_mcast_obj *o,
2268 struct ecore_mcast_ramrod_params *p,
2269 enum ecore_mcast_cmd cmd)
2272 struct ecore_pending_mcast_cmd *new_cmd;
2273 struct ecore_mcast_mac_elem *cur_mac = NULL;
2274 struct ecore_mcast_list_elem *pos;
2275 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2276 p->mcast_list_len : 0);
2278 /* If the command is empty ("handle pending commands only"), break */
2279 if (!p->mcast_list_len)
2280 return ECORE_SUCCESS;
2282 total_sz = sizeof(*new_cmd) +
2283 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2285 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2286 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2291 ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d",
2292 cmd, macs_list_len);
2294 ECORE_LIST_INIT(&new_cmd->data.macs_head);
2296 new_cmd->type = cmd;
2297 new_cmd->done = FALSE;
2300 case ECORE_MCAST_CMD_ADD:
2301 cur_mac = (struct ecore_mcast_mac_elem *)
2302 ((uint8_t *) new_cmd + sizeof(*new_cmd));
2304 /* Push the MACs of the current command into the pending command
2307 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2308 struct ecore_mcast_list_elem) {
2309 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2310 ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2311 &new_cmd->data.macs_head);
2317 case ECORE_MCAST_CMD_DEL:
2318 new_cmd->data.macs_num = p->mcast_list_len;
2321 case ECORE_MCAST_CMD_RESTORE:
2322 new_cmd->data.next_bin = 0;
2326 ECORE_FREE(sc, new_cmd, total_sz);
2327 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2331 /* Push the new pending command to the tail of the pending list: FIFO */
2332 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2336 return ECORE_PENDING;
2340 * ecore_mcast_get_next_bin - get the next set bin (index)
2343 * @last: index to start looking from (including)
2345 * returns the next found (set) bin or a negative value if none is found.
2347 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2349 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2351 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2352 if (o->registry.aprox_match.vec[i])
2353 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2354 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2355 if (BIT_VEC64_TEST_BIT
2356 (o->registry.aprox_match.vec, cur_bit)) {
2368 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2372 * returns the index of the found bin or -1 if none is found
2374 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2376 int cur_bit = ecore_mcast_get_next_bin(o, 0);
2379 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2384 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2386 struct ecore_raw_obj *raw = &o->raw;
2387 uint8_t rx_tx_flag = 0;
2389 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2390 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2391 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2393 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2394 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2395 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2400 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2401 struct ecore_mcast_obj *o, int idx,
2402 union ecore_mcast_config_data *cfg_data,
2403 enum ecore_mcast_cmd cmd)
2405 struct ecore_raw_obj *r = &o->raw;
2406 struct eth_multicast_rules_ramrod_data *data =
2407 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2408 uint8_t func_id = r->func_id;
2409 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2412 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2413 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2415 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2417 /* Get a bin and update a bins' vector */
2419 case ECORE_MCAST_CMD_ADD:
2420 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2421 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2424 case ECORE_MCAST_CMD_DEL:
2425 /* If there were no more bins to clear
2426 * (ecore_mcast_clear_first_bin() returns -1) then we would
2427 * clear any (0xff) bin.
2428 * See ecore_mcast_validate_e2() for explanation when it may
2431 bin = ecore_mcast_clear_first_bin(o);
2434 case ECORE_MCAST_CMD_RESTORE:
2435 bin = cfg_data->bin;
2439 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2443 ECORE_MSG("%s bin %d",
2444 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2445 "Setting" : "Clearing"), bin);
2447 data->rules[idx].bin_id = (uint8_t) bin;
2448 data->rules[idx].func_id = func_id;
2449 data->rules[idx].engine_id = o->engine_id;
2453 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2455 * @sc: device handle
2457 * @start_bin: index in the registry to start from (including)
2458 * @rdata_idx: index in the ramrod data to start from
2460 * returns last handled bin index or -1 if all bins have been handled
2462 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2463 struct ecore_mcast_obj *o,
2464 int start_bin, int *rdata_idx)
2466 int cur_bin, cnt = *rdata_idx;
2467 union ecore_mcast_config_data cfg_data = { NULL };
2469 /* go through the registry and configure the bins from it */
2470 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2471 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2473 cfg_data.bin = (uint8_t) cur_bin;
2474 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2478 ECORE_MSG("About to configure a bin %d", cur_bin);
2480 /* Break if we reached the maximum number
2483 if (cnt >= o->max_cmd_len)
2492 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2493 struct ecore_mcast_obj *o,
2494 struct ecore_pending_mcast_cmd
2495 *cmd_pos, int *line_idx)
2497 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2498 int cnt = *line_idx;
2499 union ecore_mcast_config_data cfg_data = { NULL };
2501 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2502 &cmd_pos->data.macs_head, link,
2503 struct ecore_mcast_mac_elem) {
2505 cfg_data.mac = &pmac_pos->mac[0];
2506 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2511 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2512 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2513 pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2515 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2516 &cmd_pos->data.macs_head);
2518 /* Break if we reached the maximum number
2521 if (cnt >= o->max_cmd_len)
2527 /* if no more MACs to configure - we are done */
2528 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2529 cmd_pos->done = TRUE;
2532 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2533 struct ecore_mcast_obj *o,
2534 struct ecore_pending_mcast_cmd
2535 *cmd_pos, int *line_idx)
2537 int cnt = *line_idx;
2539 while (cmd_pos->data.macs_num) {
2540 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2544 cmd_pos->data.macs_num--;
2546 ECORE_MSG("Deleting MAC. %d left,cnt is %d",
2547 cmd_pos->data.macs_num, cnt);
2549 /* Break if we reached the maximum
2552 if (cnt >= o->max_cmd_len)
2558 /* If we cleared all bins - we are done */
2559 if (!cmd_pos->data.macs_num)
2560 cmd_pos->done = TRUE;
2563 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2564 struct ecore_mcast_obj *o, struct
2565 ecore_pending_mcast_cmd
2566 *cmd_pos, int *line_idx)
2568 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2571 if (cmd_pos->data.next_bin < 0)
2572 /* If o->set_restore returned -1 we are done */
2573 cmd_pos->done = TRUE;
2575 /* Start from the next bin next time */
2576 cmd_pos->data.next_bin++;
2579 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2580 ecore_mcast_ramrod_params
2583 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2585 struct ecore_mcast_obj *o = p->mcast_obj;
2587 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2588 &o->pending_cmds_head, link,
2589 struct ecore_pending_mcast_cmd) {
2590 switch (cmd_pos->type) {
2591 case ECORE_MCAST_CMD_ADD:
2592 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2595 case ECORE_MCAST_CMD_DEL:
2596 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2599 case ECORE_MCAST_CMD_RESTORE:
2600 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2605 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type);
2609 /* If the command has been completed - remove it from the list
2610 * and free the memory
2612 if (cmd_pos->done) {
2613 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2614 &o->pending_cmds_head);
2615 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2618 /* Break if we reached the maximum number of rules */
2619 if (cnt >= o->max_cmd_len)
2626 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2627 struct ecore_mcast_obj *o,
2628 struct ecore_mcast_ramrod_params *p,
2631 struct ecore_mcast_list_elem *mlist_pos;
2632 union ecore_mcast_config_data cfg_data = { NULL };
2633 int cnt = *line_idx;
2635 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2636 struct ecore_mcast_list_elem) {
2637 cfg_data.mac = mlist_pos->mac;
2638 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2643 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2644 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2645 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2651 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2652 struct ecore_mcast_obj *o,
2653 struct ecore_mcast_ramrod_params *p,
2656 int cnt = *line_idx, i;
2658 for (i = 0; i < p->mcast_list_len; i++) {
2659 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2663 ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1);
2670 * ecore_mcast_handle_current_cmd -
2672 * @sc: device handle
2675 * @start_cnt: first line in the ramrod data that may be used
2677 * This function is called if there is enough place for the current command in
2679 * Returns number of lines filled in the ramrod data in total.
2681 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2682 ecore_mcast_ramrod_params *p,
2683 enum ecore_mcast_cmd cmd,
2686 struct ecore_mcast_obj *o = p->mcast_obj;
2687 int cnt = start_cnt;
2689 ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len);
2692 case ECORE_MCAST_CMD_ADD:
2693 ecore_mcast_hdl_add(sc, o, p, &cnt);
2696 case ECORE_MCAST_CMD_DEL:
2697 ecore_mcast_hdl_del(sc, o, p, &cnt);
2700 case ECORE_MCAST_CMD_RESTORE:
2701 o->hdl_restore(sc, o, 0, &cnt);
2705 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2709 /* The current command has been handled */
2710 p->mcast_list_len = 0;
2715 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2716 struct ecore_mcast_ramrod_params *p,
2717 enum ecore_mcast_cmd cmd)
2719 struct ecore_mcast_obj *o = p->mcast_obj;
2720 int reg_sz = o->get_registry_size(o);
2723 /* DEL command deletes all currently configured MACs */
2724 case ECORE_MCAST_CMD_DEL:
2725 o->set_registry_size(o, 0);
2728 /* RESTORE command will restore the entire multicast configuration */
2729 case ECORE_MCAST_CMD_RESTORE:
2730 /* Here we set the approximate amount of work to do, which in
2731 * fact may be only less as some MACs in postponed ADD
2732 * command(s) scheduled before this command may fall into
2733 * the same bin and the actual number of bins set in the
2734 * registry would be less than we estimated here. See
2735 * ecore_mcast_set_one_rule_e2() for further details.
2737 p->mcast_list_len = reg_sz;
2740 case ECORE_MCAST_CMD_ADD:
2741 case ECORE_MCAST_CMD_CONT:
2742 /* Here we assume that all new MACs will fall into new bins.
2743 * However we will correct the real registry size after we
2744 * handle all pending commands.
2746 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2750 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2754 /* Increase the total number of MACs pending to be configured */
2755 o->total_pending_num += p->mcast_list_len;
2757 return ECORE_SUCCESS;
2760 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2761 struct ecore_mcast_ramrod_params *p,
2764 struct ecore_mcast_obj *o = p->mcast_obj;
2766 o->set_registry_size(o, old_num_bins);
2767 o->total_pending_num -= p->mcast_list_len;
2771 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2773 * @sc: device handle
2775 * @len: number of rules to handle
2777 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2778 *sc, struct ecore_mcast_ramrod_params
2781 struct ecore_raw_obj *r = &p->mcast_obj->raw;
2782 struct eth_multicast_rules_ramrod_data *data =
2783 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2785 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2786 (ECORE_FILTER_MCAST_PENDING <<
2787 ECORE_SWCID_SHIFT));
2788 data->header.rule_cnt = len;
2792 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2794 * @sc: device handle
2797 * Recalculate the actual number of set bins in the registry using Brian
2798 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2800 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2805 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2806 elem = o->registry.aprox_match.vec[i];
2811 o->set_registry_size(o, cnt);
2813 return ECORE_SUCCESS;
2816 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2817 struct ecore_mcast_ramrod_params *p,
2818 enum ecore_mcast_cmd cmd)
2820 struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2821 struct ecore_mcast_obj *o = p->mcast_obj;
2822 struct eth_multicast_rules_ramrod_data *data =
2823 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2826 /* Reset the ramrod data buffer */
2827 ECORE_MEMSET(data, 0, sizeof(*data));
2829 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2831 /* If there are no more pending commands - clear SCHEDULED state */
2832 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2835 /* The below may be TRUE if there was enough room in ramrod
2836 * data for all pending commands and for the current
2837 * command. Otherwise the current command would have been added
2838 * to the pending commands and p->mcast_list_len would have been
2841 if (p->mcast_list_len > 0)
2842 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2844 /* We've pulled out some MACs - update the total number of
2847 o->total_pending_num -= cnt;
2850 ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2851 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2853 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2855 /* Update a registry size if there are no more pending operations.
2857 * We don't want to change the value of the registry size if there are
2858 * pending operations because we want it to always be equal to the
2859 * exact or the approximate number (see ecore_mcast_validate_e2()) of
2860 * set bins after the last requested operation in order to properly
2861 * evaluate the size of the next DEL/RESTORE operation.
2863 * Note that we update the registry itself during command(s) handling
2864 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2865 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2866 * with a limited amount of update commands (per MAC/bin) and we don't
2867 * know in this scope what the actual state of bins configuration is
2868 * going to be after this ramrod.
2870 if (!o->total_pending_num)
2871 ecore_mcast_refresh_registry_e2(o);
2873 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2874 * RAMROD_PENDING status immediately.
2876 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2877 raw->clear_pending(raw);
2878 return ECORE_SUCCESS;
2880 /* No need for an explicit memory barrier here as long we would
2881 * need to ensure the ordering of writing to the SPQ element
2882 * and updating of the SPQ producer which involves a memory
2883 * read and we will have to put a full memory barrier there
2884 * (inside ecore_sp_post()).
2888 rc = ecore_sp_post(sc,
2889 RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2891 raw->rdata_mapping, ETH_CONNECTION_TYPE);
2895 /* Ramrod completion is pending */
2896 return ECORE_PENDING;
2900 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2901 struct ecore_mcast_ramrod_params *p,
2902 enum ecore_mcast_cmd cmd)
2904 /* Mark, that there is a work to do */
2905 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2906 p->mcast_list_len = 1;
2908 return ECORE_SUCCESS;
2911 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2912 __rte_unused struct ecore_mcast_ramrod_params
2913 *p, __rte_unused int old_num_bins)
2918 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2920 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2923 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2924 struct ecore_mcast_obj *o,
2925 struct ecore_mcast_ramrod_params *p,
2926 uint32_t * mc_filter)
2928 struct ecore_mcast_list_elem *mlist_pos;
2931 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2932 struct ecore_mcast_list_elem) {
2933 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2934 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2937 ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2938 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2939 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2942 /* bookkeeping... */
2943 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2947 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2949 struct ecore_mcast_obj *o,
2950 uint32_t * mc_filter)
2954 for (bit = ecore_mcast_get_next_bin(o, 0);
2955 bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2956 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2957 ECORE_MSG("About to set bin %d", bit);
2961 /* On 57711 we write the multicast MACs' approximate match
2962 * table by directly into the TSTORM's internal RAM. So we don't
2963 * really need to handle any tricks to make it work.
2965 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2966 struct ecore_mcast_ramrod_params *p,
2967 enum ecore_mcast_cmd cmd)
2970 struct ecore_mcast_obj *o = p->mcast_obj;
2971 struct ecore_raw_obj *r = &o->raw;
2973 /* If CLEAR_ONLY has been requested - clear the registry
2974 * and clear a pending bit.
2976 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2977 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2979 /* Set the multicast filter bits before writing it into
2980 * the internal memory.
2983 case ECORE_MCAST_CMD_ADD:
2984 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2987 case ECORE_MCAST_CMD_DEL:
2988 ECORE_MSG("Invalidating multicast MACs configuration");
2990 /* clear the registry */
2991 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
2992 sizeof(o->registry.aprox_match.vec));
2995 case ECORE_MCAST_CMD_RESTORE:
2996 ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
3000 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
3004 /* Set the mcast filter in the internal memory */
3005 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3006 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3008 /* clear the registry */
3009 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3010 sizeof(o->registry.aprox_match.vec));
3013 r->clear_pending(r);
3015 return ECORE_SUCCESS;
3018 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3020 return o->registry.aprox_match.num_bins_set;
3023 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3026 o->registry.aprox_match.num_bins_set = n;
3029 int ecore_config_mcast(struct bnx2x_softc *sc,
3030 struct ecore_mcast_ramrod_params *p,
3031 enum ecore_mcast_cmd cmd)
3033 struct ecore_mcast_obj *o = p->mcast_obj;
3034 struct ecore_raw_obj *r = &o->raw;
3035 int rc = 0, old_reg_size;
3037 /* This is needed to recover number of currently configured mcast macs
3038 * in case of failure.
3040 old_reg_size = o->get_registry_size(o);
3042 /* Do some calculations and checks */
3043 rc = o->validate(sc, p, cmd);
3047 /* Return if there is no work to do */
3048 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3049 return ECORE_SUCCESS;
3052 ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3053 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3055 /* Enqueue the current command to the pending list if we can't complete
3056 * it in the current iteration
3058 if (r->check_pending(r) ||
3059 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3060 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3064 /* As long as the current command is in a command list we
3065 * don't need to handle it separately.
3067 p->mcast_list_len = 0;
3070 if (!r->check_pending(r)) {
3072 /* Set 'pending' state */
3075 /* Configure the new classification in the chip */
3076 rc = o->config_mcast(sc, p, cmd);
3080 /* Wait for a ramrod completion if was requested */
3081 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3082 rc = o->wait_comp(sc, o);
3088 r->clear_pending(r);
3091 o->revert(sc, p, old_reg_size);
3096 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3098 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3099 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3100 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3103 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3105 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3106 ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3107 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3110 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3112 return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3115 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3117 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3120 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3121 struct ecore_mcast_obj *mcast_obj,
3122 uint8_t mcast_cl_id, uint32_t mcast_cid,
3123 uint8_t func_id, uint8_t engine_id, void *rdata,
3124 ecore_dma_addr_t rdata_mapping, int state,
3125 unsigned long *pstate, ecore_obj_type type)
3127 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3129 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3130 rdata, rdata_mapping, state, pstate, type);
3132 mcast_obj->engine_id = engine_id;
3134 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3136 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3137 mcast_obj->check_sched = ecore_mcast_check_sched;
3138 mcast_obj->set_sched = ecore_mcast_set_sched;
3139 mcast_obj->clear_sched = ecore_mcast_clear_sched;
3141 if (CHIP_IS_E1H(sc)) {
3142 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3143 mcast_obj->enqueue_cmd = NULL;
3144 mcast_obj->hdl_restore = NULL;
3145 mcast_obj->check_pending = ecore_mcast_check_pending;
3147 /* 57711 doesn't send a ramrod, so it has unlimited credit
3150 mcast_obj->max_cmd_len = -1;
3151 mcast_obj->wait_comp = ecore_mcast_wait;
3152 mcast_obj->set_one_rule = NULL;
3153 mcast_obj->validate = ecore_mcast_validate_e1h;
3154 mcast_obj->revert = ecore_mcast_revert_e1h;
3155 mcast_obj->get_registry_size =
3156 ecore_mcast_get_registry_size_aprox;
3157 mcast_obj->set_registry_size =
3158 ecore_mcast_set_registry_size_aprox;
3160 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3161 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3162 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3163 mcast_obj->check_pending = ecore_mcast_check_pending;
3164 mcast_obj->max_cmd_len = 16;
3165 mcast_obj->wait_comp = ecore_mcast_wait;
3166 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3167 mcast_obj->validate = ecore_mcast_validate_e2;
3168 mcast_obj->revert = ecore_mcast_revert_e2;
3169 mcast_obj->get_registry_size =
3170 ecore_mcast_get_registry_size_aprox;
3171 mcast_obj->set_registry_size =
3172 ecore_mcast_set_registry_size_aprox;
3176 /*************************** Credit handling **********************************/
3179 * atomic_add_ifless - add if the result is less than a given value.
3181 * @v: pointer of type ecore_atomic_t
3182 * @a: the amount to add to v...
3183 * @u: ...if (v + a) is less than u.
3185 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3188 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3192 c = ECORE_ATOMIC_READ(v);
3194 if (ECORE_UNLIKELY(c + a >= u))
3197 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3198 if (ECORE_LIKELY(old == c))
3207 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3209 * @v: pointer of type ecore_atomic_t
3210 * @a: the amount to dec from v...
3211 * @u: ...if (v - a) is more or equal than u.
3213 * returns TRUE if (v - a) was more or equal than u, and FALSE
3216 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3220 c = ECORE_ATOMIC_READ(v);
3222 if (ECORE_UNLIKELY(c - a < u))
3225 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3226 if (ECORE_LIKELY(old == c))
3234 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3239 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3245 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3251 /* Don't let to refill if credit + cnt > pool_sz */
3252 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3259 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3264 cur_credit = ECORE_ATOMIC_READ(&o->credit);
3269 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3270 ecore_credit_pool_obj *o,
3271 __rte_unused int cnt)
3276 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3283 /* Find "internal cam-offset" then add to base for this object... */
3284 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3286 /* Skip the current vector if there are no free entries in it */
3287 if (!o->pool_mirror[vec])
3290 /* If we've got here we are going to find a free entry */
3291 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3292 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3294 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3296 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3297 *offset = o->base_pool_offset + idx;
3305 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3308 if (offset < o->base_pool_offset)
3311 offset -= o->base_pool_offset;
3313 if (offset >= o->pool_sz)
3316 /* Return the entry to the pool */
3317 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3322 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3323 ecore_credit_pool_obj *o,
3324 __rte_unused int offset)
3329 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3330 ecore_credit_pool_obj *o,
3331 __rte_unused int *offset)
3338 * ecore_init_credit_pool - initialize credit pool internals.
3341 * @base: Base entry in the CAM to use.
3342 * @credit: pool size.
3344 * If base is negative no CAM entries handling will be performed.
3345 * If credit is negative pool operations will always succeed (unlimited pool).
3348 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3349 int base, int credit)
3351 /* Zero the object first */
3352 ECORE_MEMSET(p, 0, sizeof(*p));
3354 /* Set the table to all 1s */
3355 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3357 /* Init a pool as full */
3358 ECORE_ATOMIC_SET(&p->credit, credit);
3360 /* The total poll size */
3361 p->pool_sz = credit;
3363 p->base_pool_offset = base;
3365 /* Commit the change */
3368 p->check = ecore_credit_pool_check;
3370 /* if pool credit is negative - disable the checks */
3372 p->put = ecore_credit_pool_put;
3373 p->get = ecore_credit_pool_get;
3374 p->put_entry = ecore_credit_pool_put_entry;
3375 p->get_entry = ecore_credit_pool_get_entry;
3377 p->put = ecore_credit_pool_always_TRUE;
3378 p->get = ecore_credit_pool_always_TRUE;
3379 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3380 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3383 /* If base is negative - disable entries handling */
3385 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3386 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3390 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3391 struct ecore_credit_pool_obj *p,
3392 uint8_t func_id, uint8_t func_num)
3395 #define ECORE_CAM_SIZE_EMUL 5
3399 if (CHIP_IS_E1H(sc)) {
3400 /* CAM credit is equally divided between all active functions
3404 if (!CHIP_REV_IS_SLOW(sc))
3405 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3407 cam_sz = ECORE_CAM_SIZE_EMUL;
3408 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3410 /* this should never happen! Block MAC operations. */
3411 ecore_init_credit_pool(p, 0, 0);
3417 * CAM credit is equaly divided between all active functions
3421 if (!CHIP_REV_IS_SLOW(sc))
3422 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3424 cam_sz = ECORE_CAM_SIZE_EMUL;
3426 /* No need for CAM entries handling for 57712 and
3429 ecore_init_credit_pool(p, -1, cam_sz);
3431 /* this should never happen! Block MAC operations. */
3432 ecore_init_credit_pool(p, 0, 0);
3437 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3438 struct ecore_credit_pool_obj *p,
3439 uint8_t func_id, uint8_t func_num)
3441 if (CHIP_IS_E1x(sc)) {
3442 /* There is no VLAN credit in HW on 57711 only
3443 * MAC / MAC-VLAN can be set
3445 ecore_init_credit_pool(p, 0, -1);
3447 /* CAM credit is equally divided between all active functions
3451 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3452 ecore_init_credit_pool(p, func_id * credit, credit);
3454 /* this should never happen! Block VLAN operations. */
3455 ecore_init_credit_pool(p, 0, 0);
3459 /****************** RSS Configuration ******************/
3462 * ecore_setup_rss - configure RSS
3464 * @sc: device handle
3465 * @p: rss configuration
3467 * sends on UPDATE ramrod for that matter.
3469 static int ecore_setup_rss(struct bnx2x_softc *sc,
3470 struct ecore_config_rss_params *p)
3472 struct ecore_rss_config_obj *o = p->rss_obj;
3473 struct ecore_raw_obj *r = &o->raw;
3474 struct eth_rss_update_ramrod_data *data =
3475 (struct eth_rss_update_ramrod_data *)(r->rdata);
3476 uint8_t rss_mode = 0;
3479 ECORE_MEMSET(data, 0, sizeof(*data));
3481 ECORE_MSG("Configuring RSS");
3483 /* Set an echo field */
3484 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3485 (r->state << ECORE_SWCID_SHIFT));
3488 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3489 rss_mode = ETH_RSS_MODE_DISABLED;
3490 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3491 rss_mode = ETH_RSS_MODE_REGULAR;
3493 data->rss_mode = rss_mode;
3495 ECORE_MSG("rss_mode=%d", rss_mode);
3497 /* RSS capabilities */
3498 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3499 data->capabilities |=
3500 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3502 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3503 data->capabilities |=
3504 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3506 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3507 data->capabilities |=
3508 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3510 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3511 data->capabilities |=
3512 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3514 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3515 data->capabilities |=
3516 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3518 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3519 data->capabilities |=
3520 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3522 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3523 data->udp_4tuple_dst_port_mask =
3524 ECORE_CPU_TO_LE16(p->tunnel_mask);
3525 data->udp_4tuple_dst_port_value =
3526 ECORE_CPU_TO_LE16(p->tunnel_value);
3530 data->rss_result_mask = p->rss_result_mask;
3533 data->rss_engine_id = o->engine_id;
3535 ECORE_MSG("rss_engine_id=%d", data->rss_engine_id);
3537 /* Indirection table */
3538 ECORE_MEMCPY(data->indirection_table, p->ind_table,
3539 T_ETH_INDIRECTION_TABLE_SIZE);
3541 /* Remember the last configuration */
3542 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3545 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3546 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3547 sizeof(data->rss_key));
3548 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3551 /* No need for an explicit memory barrier here as long we would
3552 * need to ensure the ordering of writing to the SPQ element
3553 * and updating of the SPQ producer which involves a memory
3554 * read and we will have to put a full memory barrier there
3555 * (inside ecore_sp_post()).
3559 rc = ecore_sp_post(sc,
3560 RAMROD_CMD_ID_ETH_RSS_UPDATE,
3561 r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3566 return ECORE_PENDING;
3569 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3572 struct ecore_rss_config_obj *o = p->rss_obj;
3573 struct ecore_raw_obj *r = &o->raw;
3575 /* Do nothing if only driver cleanup was requested */
3576 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3577 return ECORE_SUCCESS;
3581 rc = o->config_rss(sc, p);
3583 r->clear_pending(r);
3587 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3588 rc = r->wait_comp(sc, r);
3593 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3594 uint8_t cl_id, uint32_t cid, uint8_t func_id,
3595 uint8_t engine_id, void *rdata,
3596 ecore_dma_addr_t rdata_mapping, int state,
3597 unsigned long *pstate, ecore_obj_type type)
3599 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3600 rdata_mapping, state, pstate, type);
3602 rss_obj->engine_id = engine_id;
3603 rss_obj->config_rss = ecore_setup_rss;
3606 /********************** Queue state object ***********************************/
3609 * ecore_queue_state_change - perform Queue state change transition
3611 * @sc: device handle
3612 * @params: parameters to perform the transition
3614 * returns 0 in case of successfully completed transition, negative error
3615 * code in case of failure, positive (EBUSY) value if there is a completion
3616 * to that is still pending (possible only if RAMROD_COMP_WAIT is
3617 * not set in params->ramrod_flags for asynchronous commands).
3620 int ecore_queue_state_change(struct bnx2x_softc *sc,
3621 struct ecore_queue_state_params *params)
3623 struct ecore_queue_sp_obj *o = params->q_obj;
3624 int rc, pending_bit;
3625 unsigned long *pending = &o->pending;
3627 /* Check that the requested transition is legal */
3628 rc = o->check_transition(sc, o, params);
3630 PMD_DRV_LOG(ERR, "check transition returned an error. rc %d",
3635 /* Set "pending" bit */
3636 ECORE_MSG("pending bit was=%lx", o->pending);
3637 pending_bit = o->set_pending(o, params);
3638 ECORE_MSG("pending bit now=%lx", o->pending);
3640 /* Don't send a command if only driver cleanup was requested */
3641 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
3642 o->complete_cmd(sc, o, pending_bit);
3645 rc = o->send_cmd(sc, params);
3647 o->next_state = ECORE_Q_STATE_MAX;
3648 ECORE_CLEAR_BIT(pending_bit, pending);
3649 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3653 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
3654 rc = o->wait_comp(sc, o, pending_bit);
3658 return ECORE_SUCCESS;
3662 return ECORE_RET_PENDING(pending_bit, pending);
3665 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3666 struct ecore_queue_state_params *params)
3668 enum ecore_queue_cmd cmd = params->cmd, bit;
3670 /* ACTIVATE and DEACTIVATE commands are implemented on top of
3673 if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3674 bit = ECORE_Q_CMD_UPDATE;
3678 ECORE_SET_BIT(bit, &obj->pending);
3682 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3683 struct ecore_queue_sp_obj *o,
3684 enum ecore_queue_cmd cmd)
3686 return ecore_state_wait(sc, cmd, &o->pending);
3690 * ecore_queue_comp_cmd - complete the state change command.
3692 * @sc: device handle
3696 * Checks that the arrived completion is expected.
3698 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3699 struct ecore_queue_sp_obj *o,
3700 enum ecore_queue_cmd cmd)
3702 unsigned long cur_pending = o->pending;
3704 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3706 "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3707 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3708 cur_pending, o->next_state);
3712 if (o->next_tx_only >= o->max_cos)
3713 /* >= because tx only must always be smaller than cos since the
3714 * primary connection supports COS 0
3717 "illegal value for next tx_only: %d. max cos was %d",
3718 o->next_tx_only, o->max_cos);
3720 ECORE_MSG("Completing command %d for queue %d, setting state to %d",
3721 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3723 if (o->next_tx_only) /* print num tx-only if any exist */
3724 ECORE_MSG("primary cid %d: num tx-only cons %d",
3725 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3727 o->state = o->next_state;
3728 o->num_tx_only = o->next_tx_only;
3729 o->next_state = ECORE_Q_STATE_MAX;
3731 /* It's important that o->state and o->next_state are
3732 * updated before o->pending.
3736 ECORE_CLEAR_BIT(cmd, &o->pending);
3737 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3739 return ECORE_SUCCESS;
3742 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3744 struct client_init_ramrod_data *data)
3746 struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3750 /* IPv6 TPA supported for E2 and above only */
3751 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3753 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3756 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3757 struct ecore_queue_sp_obj *o,
3758 struct ecore_general_setup_params
3759 *params, struct client_init_general_data
3760 *gen_data, unsigned long *flags)
3762 gen_data->client_id = o->cl_id;
3764 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3765 gen_data->statistics_counter_id = params->stat_id;
3766 gen_data->statistics_en_flg = 1;
3767 gen_data->statistics_zero_flg =
3768 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3770 gen_data->statistics_counter_id =
3771 DISABLE_STATISTIC_COUNTER_ID_VALUE;
3773 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3774 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3775 gen_data->sp_client_id = params->spcl_id;
3776 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3777 gen_data->func_id = o->func_id;
3779 gen_data->cos = params->cos;
3781 gen_data->traffic_type =
3782 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3783 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3785 ECORE_MSG("flags: active %d, cos %d, stats en %d",
3786 gen_data->activate_flg, gen_data->cos,
3787 gen_data->statistics_en_flg);
3790 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3791 struct client_init_tx_data *tx_data,
3792 unsigned long *flags)
3794 tx_data->enforce_security_flg =
3795 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3796 tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3797 tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3798 tx_data->tx_switching_flg =
3799 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3800 tx_data->anti_spoofing_flg =
3801 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3802 tx_data->force_default_pri_flg =
3803 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3804 tx_data->refuse_outband_vlan_flg =
3805 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3806 tx_data->tunnel_non_lso_pcsum_location =
3807 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3810 tx_data->tx_status_block_id = params->fw_sb_id;
3811 tx_data->tx_sb_index_number = params->sb_cq_index;
3812 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3814 tx_data->tx_bd_page_base.lo =
3815 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3816 tx_data->tx_bd_page_base.hi =
3817 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3819 /* Don't configure any Tx switching mode during queue SETUP */
3823 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3824 struct client_init_rx_data *rx_data)
3826 /* flow control data */
3827 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3828 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3829 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3830 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3831 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3832 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3833 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3836 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3837 struct client_init_rx_data *rx_data,
3838 unsigned long *flags)
3840 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3841 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3842 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3843 CLIENT_INIT_RX_DATA_TPA_MODE;
3844 rx_data->vmqueue_mode_en_flg = 0;
3846 rx_data->extra_data_over_sgl_en_flg =
3847 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3848 rx_data->cache_line_alignment_log_size = params->cache_line_log;
3849 rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3850 rx_data->client_qzone_id = params->cl_qzone_id;
3851 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3853 /* Always start in DROP_ALL mode */
3854 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3855 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3857 /* We don't set drop flags */
3858 rx_data->drop_ip_cs_err_flg = 0;
3859 rx_data->drop_tcp_cs_err_flg = 0;
3860 rx_data->drop_ttl0_flg = 0;
3861 rx_data->drop_udp_cs_err_flg = 0;
3862 rx_data->inner_vlan_removal_enable_flg =
3863 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3864 rx_data->outer_vlan_removal_enable_flg =
3865 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3866 rx_data->status_block_id = params->fw_sb_id;
3867 rx_data->rx_sb_index_number = params->sb_cq_index;
3868 rx_data->max_tpa_queues = params->max_tpa_queues;
3869 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3870 rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3871 rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3872 rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3873 rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3874 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3877 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3878 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3879 rx_data->is_approx_mcast = 1;
3882 rx_data->rss_engine_id = params->rss_engine_id;
3884 /* silent vlan removal */
3885 rx_data->silent_vlan_removal_flg =
3886 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3887 rx_data->silent_vlan_value =
3888 ECORE_CPU_TO_LE16(params->silent_removal_value);
3889 rx_data->silent_vlan_mask =
3890 ECORE_CPU_TO_LE16(params->silent_removal_mask);
3893 /* initialize the general, tx and rx parts of a queue object */
3894 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3896 struct client_init_ramrod_data *data)
3898 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3899 &cmd_params->params.setup.gen_params,
3901 &cmd_params->params.setup.flags);
3903 ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3904 &data->tx, &cmd_params->params.setup.flags);
3906 ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3907 &data->rx, &cmd_params->params.setup.flags);
3909 ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3913 /* initialize the general and tx parts of a tx-only queue object */
3914 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3916 struct tx_queue_init_ramrod_data *data)
3918 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3919 &cmd_params->params.tx_only.gen_params,
3921 &cmd_params->params.tx_only.flags);
3923 ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3924 &data->tx, &cmd_params->params.tx_only.flags);
3926 ECORE_MSG("cid %d, tx bd page lo %x hi %x",
3927 cmd_params->q_obj->cids[0],
3928 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3932 * ecore_q_init - init HW/FW queue
3934 * @sc: device handle
3937 * HW/FW initial Queue configuration:
3939 * - CDU context validation
3942 static int ecore_q_init(struct bnx2x_softc *sc,
3943 struct ecore_queue_state_params *params)
3945 struct ecore_queue_sp_obj *o = params->q_obj;
3946 struct ecore_queue_init_params *init = ¶ms->params.init;
3950 /* Tx HC configuration */
3951 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3952 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3953 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3955 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3956 init->tx.sb_cq_index,
3959 &init->tx.flags), hc_usec);
3962 /* Rx HC configuration */
3963 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3964 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3965 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3967 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3968 init->rx.sb_cq_index,
3971 &init->rx.flags), hc_usec);
3974 /* Set CDU context validation values */
3975 for (cos = 0; cos < o->max_cos; cos++) {
3976 ECORE_MSG("setting context validation. cid %d, cos %d",
3978 ECORE_MSG("context pointer %p", init->cxts[cos]);
3979 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3982 /* As no ramrod is sent, complete the command immediately */
3983 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3988 return ECORE_SUCCESS;
3991 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
3994 struct ecore_queue_sp_obj *o = params->q_obj;
3995 struct client_init_ramrod_data *rdata =
3996 (struct client_init_ramrod_data *)o->rdata;
3997 ecore_dma_addr_t data_mapping = o->rdata_mapping;
3998 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4000 /* Clear the ramrod data */
4001 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4003 /* Fill the ramrod data */
4004 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4006 /* No need for an explicit memory barrier here as long we would
4007 * need to ensure the ordering of writing to the SPQ element
4008 * and updating of the SPQ producer which involves a memory
4009 * read and we will have to put a full memory barrier there
4010 * (inside ecore_sp_post()).
4013 return ecore_sp_post(sc,
4015 o->cids[ECORE_PRIMARY_CID_INDEX],
4016 data_mapping, ETH_CONNECTION_TYPE);
4019 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4020 struct ecore_queue_state_params *params)
4022 struct ecore_queue_sp_obj *o = params->q_obj;
4023 struct client_init_ramrod_data *rdata =
4024 (struct client_init_ramrod_data *)o->rdata;
4025 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4026 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4028 /* Clear the ramrod data */
4029 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4031 /* Fill the ramrod data */
4032 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4033 ecore_q_fill_setup_data_e2(params, rdata);
4035 /* No need for an explicit memory barrier here as long we would
4036 * need to ensure the ordering of writing to the SPQ element
4037 * and updating of the SPQ producer which involves a memory
4038 * read and we will have to put a full memory barrier there
4039 * (inside ecore_sp_post()).
4042 return ecore_sp_post(sc,
4044 o->cids[ECORE_PRIMARY_CID_INDEX],
4045 data_mapping, ETH_CONNECTION_TYPE);
4048 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4051 struct ecore_queue_sp_obj *o = params->q_obj;
4052 struct tx_queue_init_ramrod_data *rdata =
4053 (struct tx_queue_init_ramrod_data *)o->rdata;
4054 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4055 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4056 struct ecore_queue_setup_tx_only_params *tx_only_params =
4057 ¶ms->params.tx_only;
4058 uint8_t cid_index = tx_only_params->cid_index;
4060 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4061 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4062 ECORE_MSG("sending forward tx-only ramrod");
4064 if (cid_index >= o->max_cos) {
4065 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4066 o->cl_id, cid_index);
4070 ECORE_MSG("parameters received: cos: %d sp-id: %d",
4071 tx_only_params->gen_params.cos,
4072 tx_only_params->gen_params.spcl_id);
4074 /* Clear the ramrod data */
4075 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4077 /* Fill the ramrod data */
4078 ecore_q_fill_setup_tx_only(sc, params, rdata);
4081 ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4082 o->cids[cid_index], rdata->general.client_id,
4083 rdata->general.sp_client_id, rdata->general.cos);
4085 /* No need for an explicit memory barrier here as long we would
4086 * need to ensure the ordering of writing to the SPQ element
4087 * and updating of the SPQ producer which involves a memory
4088 * read and we will have to put a full memory barrier there
4089 * (inside ecore_sp_post()).
4092 return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4093 data_mapping, ETH_CONNECTION_TYPE);
4096 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4097 struct ecore_queue_update_params *params,
4098 struct client_update_ramrod_data *data)
4100 /* Client ID of the client to update */
4101 data->client_id = obj->cl_id;
4103 /* Function ID of the client to update */
4104 data->func_id = obj->func_id;
4106 /* Default VLAN value */
4107 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4109 /* Inner VLAN stripping */
4110 data->inner_vlan_removal_enable_flg =
4111 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4112 data->inner_vlan_removal_change_flg =
4113 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4114 ¶ms->update_flags);
4116 /* Outer VLAN stripping */
4117 data->outer_vlan_removal_enable_flg =
4118 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4119 data->outer_vlan_removal_change_flg =
4120 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4121 ¶ms->update_flags);
4123 /* Drop packets that have source MAC that doesn't belong to this
4126 data->anti_spoofing_enable_flg =
4127 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4128 data->anti_spoofing_change_flg =
4129 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4130 ¶ms->update_flags);
4132 /* Activate/Deactivate */
4133 data->activate_flg =
4134 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4135 data->activate_change_flg =
4136 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4138 /* Enable default VLAN */
4139 data->default_vlan_enable_flg =
4140 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4141 data->default_vlan_change_flg =
4142 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4143 ¶ms->update_flags);
4145 /* silent vlan removal */
4146 data->silent_vlan_change_flg =
4147 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4148 ¶ms->update_flags);
4149 data->silent_vlan_removal_flg =
4150 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4151 ¶ms->update_flags);
4152 data->silent_vlan_value =
4153 ECORE_CPU_TO_LE16(params->silent_removal_value);
4154 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4157 data->tx_switching_flg =
4158 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags);
4159 data->tx_switching_change_flg =
4160 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4161 ¶ms->update_flags);
4164 static int ecore_q_send_update(struct bnx2x_softc *sc,
4165 struct ecore_queue_state_params *params)
4167 struct ecore_queue_sp_obj *o = params->q_obj;
4168 struct client_update_ramrod_data *rdata =
4169 (struct client_update_ramrod_data *)o->rdata;
4170 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4171 struct ecore_queue_update_params *update_params =
4172 ¶ms->params.update;
4173 uint8_t cid_index = update_params->cid_index;
4175 if (cid_index >= o->max_cos) {
4176 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4177 o->cl_id, cid_index);
4181 /* Clear the ramrod data */
4182 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4184 /* Fill the ramrod data */
4185 ecore_q_fill_update_data(o, update_params, rdata);
4187 /* No need for an explicit memory barrier here as long we would
4188 * need to ensure the ordering of writing to the SPQ element
4189 * and updating of the SPQ producer which involves a memory
4190 * read and we will have to put a full memory barrier there
4191 * (inside ecore_sp_post()).
4194 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4195 o->cids[cid_index], data_mapping,
4196 ETH_CONNECTION_TYPE);
4200 * ecore_q_send_deactivate - send DEACTIVATE command
4202 * @sc: device handle
4205 * implemented using the UPDATE command.
4207 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4210 struct ecore_queue_update_params *update = ¶ms->params.update;
4212 ECORE_MEMSET(update, 0, sizeof(*update));
4214 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4216 return ecore_q_send_update(sc, params);
4220 * ecore_q_send_activate - send ACTIVATE command
4222 * @sc: device handle
4225 * implemented using the UPDATE command.
4227 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4228 struct ecore_queue_state_params *params)
4230 struct ecore_queue_update_params *update = ¶ms->params.update;
4232 ECORE_MEMSET(update, 0, sizeof(*update));
4234 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4235 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4237 return ecore_q_send_update(sc, params);
4240 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4242 ecore_queue_state_params *params)
4244 /* Not implemented yet. */
4248 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4249 struct ecore_queue_state_params *params)
4251 struct ecore_queue_sp_obj *o = params->q_obj;
4253 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4254 ecore_dma_addr_t data_mapping = 0;
4255 data_mapping = (ecore_dma_addr_t) o->cl_id;
4257 return ecore_sp_post(sc,
4258 RAMROD_CMD_ID_ETH_HALT,
4259 o->cids[ECORE_PRIMARY_CID_INDEX],
4260 data_mapping, ETH_CONNECTION_TYPE);
4263 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4264 struct ecore_queue_state_params *params)
4266 struct ecore_queue_sp_obj *o = params->q_obj;
4267 uint8_t cid_idx = params->params.cfc_del.cid_index;
4269 if (cid_idx >= o->max_cos) {
4270 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4275 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4276 o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4279 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4282 struct ecore_queue_sp_obj *o = params->q_obj;
4283 uint8_t cid_index = params->params.terminate.cid_index;
4285 if (cid_index >= o->max_cos) {
4286 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4287 o->cl_id, cid_index);
4291 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4292 o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4295 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4296 struct ecore_queue_state_params *params)
4298 struct ecore_queue_sp_obj *o = params->q_obj;
4300 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4301 o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4302 ETH_CONNECTION_TYPE);
4305 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4308 switch (params->cmd) {
4309 case ECORE_Q_CMD_INIT:
4310 return ecore_q_init(sc, params);
4311 case ECORE_Q_CMD_SETUP_TX_ONLY:
4312 return ecore_q_send_setup_tx_only(sc, params);
4313 case ECORE_Q_CMD_DEACTIVATE:
4314 return ecore_q_send_deactivate(sc, params);
4315 case ECORE_Q_CMD_ACTIVATE:
4316 return ecore_q_send_activate(sc, params);
4317 case ECORE_Q_CMD_UPDATE:
4318 return ecore_q_send_update(sc, params);
4319 case ECORE_Q_CMD_UPDATE_TPA:
4320 return ecore_q_send_update_tpa(sc, params);
4321 case ECORE_Q_CMD_HALT:
4322 return ecore_q_send_halt(sc, params);
4323 case ECORE_Q_CMD_CFC_DEL:
4324 return ecore_q_send_cfc_del(sc, params);
4325 case ECORE_Q_CMD_TERMINATE:
4326 return ecore_q_send_terminate(sc, params);
4327 case ECORE_Q_CMD_EMPTY:
4328 return ecore_q_send_empty(sc, params);
4330 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4335 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4336 struct ecore_queue_state_params *params)
4338 switch (params->cmd) {
4339 case ECORE_Q_CMD_SETUP:
4340 return ecore_q_send_setup_e1x(sc, params);
4341 case ECORE_Q_CMD_INIT:
4342 case ECORE_Q_CMD_SETUP_TX_ONLY:
4343 case ECORE_Q_CMD_DEACTIVATE:
4344 case ECORE_Q_CMD_ACTIVATE:
4345 case ECORE_Q_CMD_UPDATE:
4346 case ECORE_Q_CMD_UPDATE_TPA:
4347 case ECORE_Q_CMD_HALT:
4348 case ECORE_Q_CMD_CFC_DEL:
4349 case ECORE_Q_CMD_TERMINATE:
4350 case ECORE_Q_CMD_EMPTY:
4351 return ecore_queue_send_cmd_cmn(sc, params);
4353 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4358 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4359 struct ecore_queue_state_params *params)
4361 switch (params->cmd) {
4362 case ECORE_Q_CMD_SETUP:
4363 return ecore_q_send_setup_e2(sc, params);
4364 case ECORE_Q_CMD_INIT:
4365 case ECORE_Q_CMD_SETUP_TX_ONLY:
4366 case ECORE_Q_CMD_DEACTIVATE:
4367 case ECORE_Q_CMD_ACTIVATE:
4368 case ECORE_Q_CMD_UPDATE:
4369 case ECORE_Q_CMD_UPDATE_TPA:
4370 case ECORE_Q_CMD_HALT:
4371 case ECORE_Q_CMD_CFC_DEL:
4372 case ECORE_Q_CMD_TERMINATE:
4373 case ECORE_Q_CMD_EMPTY:
4374 return ecore_queue_send_cmd_cmn(sc, params);
4376 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4382 * ecore_queue_chk_transition - check state machine of a regular Queue
4384 * @sc: device handle
4389 * It both checks if the requested command is legal in a current
4390 * state and, if it's legal, sets a `next_state' in the object
4391 * that will be used in the completion flow to set the `state'
4394 * returns 0 if a requested command is a legal transition,
4395 * ECORE_INVAL otherwise.
4397 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4398 struct ecore_queue_sp_obj *o,
4399 struct ecore_queue_state_params *params)
4401 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4402 enum ecore_queue_cmd cmd = params->cmd;
4403 struct ecore_queue_update_params *update_params =
4404 ¶ms->params.update;
4405 uint8_t next_tx_only = o->num_tx_only;
4407 /* Forget all pending for completion commands if a driver only state
4408 * transition has been requested.
4410 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4412 o->next_state = ECORE_Q_STATE_MAX;
4415 /* Don't allow a next state transition if we are in the middle of
4419 PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx",
4425 case ECORE_Q_STATE_RESET:
4426 if (cmd == ECORE_Q_CMD_INIT)
4427 next_state = ECORE_Q_STATE_INITIALIZED;
4430 case ECORE_Q_STATE_INITIALIZED:
4431 if (cmd == ECORE_Q_CMD_SETUP) {
4432 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4433 ¶ms->params.setup.flags))
4434 next_state = ECORE_Q_STATE_ACTIVE;
4436 next_state = ECORE_Q_STATE_INACTIVE;
4440 case ECORE_Q_STATE_ACTIVE:
4441 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4442 next_state = ECORE_Q_STATE_INACTIVE;
4444 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4445 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4446 next_state = ECORE_Q_STATE_ACTIVE;
4448 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4449 next_state = ECORE_Q_STATE_MULTI_COS;
4453 else if (cmd == ECORE_Q_CMD_HALT)
4454 next_state = ECORE_Q_STATE_STOPPED;
4456 else if (cmd == ECORE_Q_CMD_UPDATE) {
4457 /* If "active" state change is requested, update the
4458 * state accordingly.
4460 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4461 &update_params->update_flags) &&
4462 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4463 &update_params->update_flags))
4464 next_state = ECORE_Q_STATE_INACTIVE;
4466 next_state = ECORE_Q_STATE_ACTIVE;
4470 case ECORE_Q_STATE_MULTI_COS:
4471 if (cmd == ECORE_Q_CMD_TERMINATE)
4472 next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4474 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4475 next_state = ECORE_Q_STATE_MULTI_COS;
4476 next_tx_only = o->num_tx_only + 1;
4479 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4480 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4481 next_state = ECORE_Q_STATE_MULTI_COS;
4483 else if (cmd == ECORE_Q_CMD_UPDATE) {
4484 /* If "active" state change is requested, update the
4485 * state accordingly.
4487 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4488 &update_params->update_flags) &&
4489 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4490 &update_params->update_flags))
4491 next_state = ECORE_Q_STATE_INACTIVE;
4493 next_state = ECORE_Q_STATE_MULTI_COS;
4497 case ECORE_Q_STATE_MCOS_TERMINATED:
4498 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4499 next_tx_only = o->num_tx_only - 1;
4500 if (next_tx_only == 0)
4501 next_state = ECORE_Q_STATE_ACTIVE;
4503 next_state = ECORE_Q_STATE_MULTI_COS;
4507 case ECORE_Q_STATE_INACTIVE:
4508 if (cmd == ECORE_Q_CMD_ACTIVATE)
4509 next_state = ECORE_Q_STATE_ACTIVE;
4511 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4512 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4513 next_state = ECORE_Q_STATE_INACTIVE;
4515 else if (cmd == ECORE_Q_CMD_HALT)
4516 next_state = ECORE_Q_STATE_STOPPED;
4518 else if (cmd == ECORE_Q_CMD_UPDATE) {
4519 /* If "active" state change is requested, update the
4520 * state accordingly.
4522 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4523 &update_params->update_flags) &&
4524 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4525 &update_params->update_flags)) {
4526 if (o->num_tx_only == 0)
4527 next_state = ECORE_Q_STATE_ACTIVE;
4528 else /* tx only queues exist for this queue */
4529 next_state = ECORE_Q_STATE_MULTI_COS;
4531 next_state = ECORE_Q_STATE_INACTIVE;
4535 case ECORE_Q_STATE_STOPPED:
4536 if (cmd == ECORE_Q_CMD_TERMINATE)
4537 next_state = ECORE_Q_STATE_TERMINATED;
4540 case ECORE_Q_STATE_TERMINATED:
4541 if (cmd == ECORE_Q_CMD_CFC_DEL)
4542 next_state = ECORE_Q_STATE_RESET;
4546 PMD_DRV_LOG(ERR, "Illegal state: %d", state);
4549 /* Transition is assured */
4550 if (next_state != ECORE_Q_STATE_MAX) {
4551 ECORE_MSG("Good state transition: %d(%d)->%d",
4552 state, cmd, next_state);
4553 o->next_state = next_state;
4554 o->next_tx_only = next_tx_only;
4555 return ECORE_SUCCESS;
4558 ECORE_MSG("Bad state transition request: %d %d", state, cmd);
4564 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4566 * @sc: device handle
4570 * It both checks if the requested command is legal in a current
4571 * state and, if it's legal, sets a `next_state' in the object
4572 * that will be used in the completion flow to set the `state'
4575 * returns 0 if a requested command is a legal transition,
4576 * ECORE_INVAL otherwise.
4578 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4579 struct ecore_queue_sp_obj *o,
4580 struct ecore_queue_state_params
4583 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4584 enum ecore_queue_cmd cmd = params->cmd;
4587 case ECORE_Q_STATE_RESET:
4588 if (cmd == ECORE_Q_CMD_INIT)
4589 next_state = ECORE_Q_STATE_INITIALIZED;
4592 case ECORE_Q_STATE_INITIALIZED:
4593 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4594 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4595 ¶ms->params.tx_only.flags))
4596 next_state = ECORE_Q_STATE_ACTIVE;
4598 next_state = ECORE_Q_STATE_INACTIVE;
4602 case ECORE_Q_STATE_ACTIVE:
4603 case ECORE_Q_STATE_INACTIVE:
4604 if (cmd == ECORE_Q_CMD_CFC_DEL)
4605 next_state = ECORE_Q_STATE_RESET;
4609 PMD_DRV_LOG(ERR, "Illegal state: %d", state);
4612 /* Transition is assured */
4613 if (next_state != ECORE_Q_STATE_MAX) {
4614 ECORE_MSG("Good state transition: %d(%d)->%d",
4615 state, cmd, next_state);
4616 o->next_state = next_state;
4617 return ECORE_SUCCESS;
4620 ECORE_MSG("Bad state transition request: %d %d", state, cmd);
4624 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4625 struct ecore_queue_sp_obj *obj,
4626 uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4627 uint8_t func_id, void *rdata,
4628 ecore_dma_addr_t rdata_mapping, unsigned long type)
4630 ECORE_MEMSET(obj, 0, sizeof(*obj));
4632 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4633 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4635 rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4636 obj->max_cos = cid_cnt;
4638 obj->func_id = func_id;
4640 obj->rdata_mapping = rdata_mapping;
4642 obj->next_state = ECORE_Q_STATE_MAX;
4644 if (CHIP_IS_E1x(sc))
4645 obj->send_cmd = ecore_queue_send_cmd_e1x;
4647 obj->send_cmd = ecore_queue_send_cmd_e2;
4649 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4650 obj->check_transition = ecore_queue_chk_fwd_transition;
4652 obj->check_transition = ecore_queue_chk_transition;
4654 obj->complete_cmd = ecore_queue_comp_cmd;
4655 obj->wait_comp = ecore_queue_wait_comp;
4656 obj->set_pending = ecore_queue_set_pending;
4659 /********************** Function state object *********************************/
4660 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4661 struct ecore_func_sp_obj *o)
4663 /* in the middle of transaction - return INVALID state */
4665 return ECORE_F_STATE_MAX;
4667 /* unsure the order of reading of o->pending and o->state
4668 * o->pending should be read first
4675 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4676 struct ecore_func_sp_obj *o,
4677 enum ecore_func_cmd cmd)
4679 return ecore_state_wait(sc, cmd, &o->pending);
4683 * ecore_func_state_change_comp - complete the state machine transition
4685 * @sc: device handle
4689 * Called on state change transition. Completes the state
4690 * machine transition only - no HW interaction.
4693 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4694 struct ecore_func_sp_obj *o,
4695 enum ecore_func_cmd cmd)
4697 unsigned long cur_pending = o->pending;
4699 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4701 "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4702 cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4707 ECORE_MSG("Completing command %d for func %d, setting state to %d",
4708 cmd, ECORE_FUNC_ID(sc), o->next_state);
4710 o->state = o->next_state;
4711 o->next_state = ECORE_F_STATE_MAX;
4713 /* It's important that o->state and o->next_state are
4714 * updated before o->pending.
4718 ECORE_CLEAR_BIT(cmd, &o->pending);
4719 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4721 return ECORE_SUCCESS;
4725 * ecore_func_comp_cmd - complete the state change command
4727 * @sc: device handle
4731 * Checks that the arrived completion is expected.
4733 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4734 struct ecore_func_sp_obj *o,
4735 enum ecore_func_cmd cmd)
4737 /* Complete the state machine part first, check if it's a
4740 int rc = ecore_func_state_change_comp(sc, o, cmd);
4745 * ecore_func_chk_transition - perform function state machine transition
4747 * @sc: device handle
4751 * It both checks if the requested command is legal in a current
4752 * state and, if it's legal, sets a `next_state' in the object
4753 * that will be used in the completion flow to set the `state'
4756 * returns 0 if a requested command is a legal transition,
4757 * ECORE_INVAL otherwise.
4759 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4760 struct ecore_func_sp_obj *o,
4761 struct ecore_func_state_params *params)
4763 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4764 enum ecore_func_cmd cmd = params->cmd;
4766 /* Forget all pending for completion commands if a driver only state
4767 * transition has been requested.
4769 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4771 o->next_state = ECORE_F_STATE_MAX;
4774 /* Don't allow a next state transition if we are in the middle of
4781 case ECORE_F_STATE_RESET:
4782 if (cmd == ECORE_F_CMD_HW_INIT)
4783 next_state = ECORE_F_STATE_INITIALIZED;
4786 case ECORE_F_STATE_INITIALIZED:
4787 if (cmd == ECORE_F_CMD_START)
4788 next_state = ECORE_F_STATE_STARTED;
4790 else if (cmd == ECORE_F_CMD_HW_RESET)
4791 next_state = ECORE_F_STATE_RESET;
4794 case ECORE_F_STATE_STARTED:
4795 if (cmd == ECORE_F_CMD_STOP)
4796 next_state = ECORE_F_STATE_INITIALIZED;
4797 /* afex ramrods can be sent only in started mode, and only
4798 * if not pending for function_stop ramrod completion
4799 * for these events - next state remained STARTED.
4801 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4802 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4803 next_state = ECORE_F_STATE_STARTED;
4805 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4806 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4807 next_state = ECORE_F_STATE_STARTED;
4809 /* Switch_update ramrod can be sent in either started or
4810 * tx_stopped state, and it doesn't change the state.
4812 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4813 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4814 next_state = ECORE_F_STATE_STARTED;
4816 else if (cmd == ECORE_F_CMD_TX_STOP)
4817 next_state = ECORE_F_STATE_TX_STOPPED;
4820 case ECORE_F_STATE_TX_STOPPED:
4821 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4822 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4823 next_state = ECORE_F_STATE_TX_STOPPED;
4825 else if (cmd == ECORE_F_CMD_TX_START)
4826 next_state = ECORE_F_STATE_STARTED;
4830 PMD_DRV_LOG(ERR, "Unknown state: %d", state);
4833 /* Transition is assured */
4834 if (next_state != ECORE_F_STATE_MAX) {
4835 ECORE_MSG("Good function state transition: %d(%d)->%d",
4836 state, cmd, next_state);
4837 o->next_state = next_state;
4838 return ECORE_SUCCESS;
4841 ECORE_MSG("Bad function state transition request: %d %d", state, cmd);
4847 * ecore_func_init_func - performs HW init at function stage
4849 * @sc: device handle
4852 * Init HW when the current phase is
4853 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4856 static int ecore_func_init_func(struct bnx2x_softc *sc,
4857 const struct ecore_func_sp_drv_ops *drv)
4859 return drv->init_hw_func(sc);
4863 * ecore_func_init_port - performs HW init at port stage
4865 * @sc: device handle
4868 * Init HW when the current phase is
4869 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4870 * FUNCTION-only HW blocks.
4873 static int ecore_func_init_port(struct bnx2x_softc *sc,
4874 const struct ecore_func_sp_drv_ops *drv)
4876 int rc = drv->init_hw_port(sc);
4880 return ecore_func_init_func(sc, drv);
4884 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4886 * @sc: device handle
4889 * Init HW when the current phase is
4890 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4891 * PORT-only and FUNCTION-only HW blocks.
4893 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4896 int rc = drv->init_hw_cmn_chip(sc);
4900 return ecore_func_init_port(sc, drv);
4904 * ecore_func_init_cmn - performs HW init at common stage
4906 * @sc: device handle
4909 * Init HW when the current phase is
4910 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4911 * PORT-only and FUNCTION-only HW blocks.
4913 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4914 const struct ecore_func_sp_drv_ops *drv)
4916 int rc = drv->init_hw_cmn(sc);
4920 return ecore_func_init_port(sc, drv);
4923 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4924 struct ecore_func_state_params *params)
4926 uint32_t load_code = params->params.hw_init.load_phase;
4927 struct ecore_func_sp_obj *o = params->f_obj;
4928 const struct ecore_func_sp_drv_ops *drv = o->drv;
4931 ECORE_MSG("function %d load_code %x",
4932 ECORE_ABS_FUNC_ID(sc), load_code);
4935 rc = drv->init_fw(sc);
4937 PMD_DRV_LOG(ERR, "Error loading firmware");
4941 /* Handle the beginning of COMMON_XXX pases separately... */
4942 switch (load_code) {
4943 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4944 rc = ecore_func_init_cmn_chip(sc, drv);
4949 case FW_MSG_CODE_DRV_LOAD_COMMON:
4950 rc = ecore_func_init_cmn(sc, drv);
4955 case FW_MSG_CODE_DRV_LOAD_PORT:
4956 rc = ecore_func_init_port(sc, drv);
4961 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4962 rc = ecore_func_init_func(sc, drv);
4968 PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP",
4974 /* In case of success, complete the command immediately: no ramrods
4978 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4984 * ecore_func_reset_func - reset HW at function stage
4986 * @sc: device handle
4989 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4990 * FUNCTION-only HW blocks.
4992 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4995 drv->reset_hw_func(sc);
4999 * ecore_func_reset_port - reser HW at port stage
5001 * @sc: device handle
5004 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5005 * FUNCTION-only and PORT-only HW blocks.
5009 * It's important to call reset_port before reset_func() as the last thing
5010 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5011 * makes impossible any DMAE transactions.
5013 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5016 drv->reset_hw_port(sc);
5017 ecore_func_reset_func(sc, drv);
5021 * ecore_func_reset_cmn - reser HW at common stage
5023 * @sc: device handle
5026 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5027 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5028 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5030 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5031 const struct ecore_func_sp_drv_ops *drv)
5033 ecore_func_reset_port(sc, drv);
5034 drv->reset_hw_cmn(sc);
5037 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5038 struct ecore_func_state_params *params)
5040 uint32_t reset_phase = params->params.hw_reset.reset_phase;
5041 struct ecore_func_sp_obj *o = params->f_obj;
5042 const struct ecore_func_sp_drv_ops *drv = o->drv;
5044 ECORE_MSG("function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5047 switch (reset_phase) {
5048 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5049 ecore_func_reset_cmn(sc, drv);
5051 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5052 ecore_func_reset_port(sc, drv);
5054 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5055 ecore_func_reset_func(sc, drv);
5058 PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP",
5063 /* Complete the command immediately: no ramrods have been sent. */
5064 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5066 return ECORE_SUCCESS;
5069 static int ecore_func_send_start(struct bnx2x_softc *sc,
5070 struct ecore_func_state_params *params)
5072 struct ecore_func_sp_obj *o = params->f_obj;
5073 struct function_start_data *rdata =
5074 (struct function_start_data *)o->rdata;
5075 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5076 struct ecore_func_start_params *start_params = ¶ms->params.start;
5078 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5080 /* Fill the ramrod data with provided parameters */
5081 rdata->function_mode = (uint8_t) start_params->mf_mode;
5082 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5083 rdata->path_id = ECORE_PATH_ID(sc);
5084 rdata->network_cos_mode = start_params->network_cos_mode;
5085 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5086 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5089 * No need for an explicit memory barrier here as long we would
5090 * need to ensure the ordering of writing to the SPQ element
5091 * and updating of the SPQ producer which involves a memory
5092 * read and we will have to put a full memory barrier there
5093 * (inside ecore_sp_post()).
5096 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5097 data_mapping, NONE_CONNECTION_TYPE);
5100 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5103 struct ecore_func_sp_obj *o = params->f_obj;
5104 struct function_update_data *rdata =
5105 (struct function_update_data *)o->rdata;
5106 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5107 struct ecore_func_switch_update_params *switch_update_params =
5108 ¶ms->params.switch_update;
5110 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5112 /* Fill the ramrod data with provided parameters */
5113 rdata->tx_switch_suspend_change_flg = 1;
5114 rdata->tx_switch_suspend = switch_update_params->suspend;
5115 rdata->echo = SWITCH_UPDATE;
5117 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5118 data_mapping, NONE_CONNECTION_TYPE);
5121 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5124 struct ecore_func_sp_obj *o = params->f_obj;
5125 struct function_update_data *rdata =
5126 (struct function_update_data *)o->afex_rdata;
5127 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5128 struct ecore_func_afex_update_params *afex_update_params =
5129 ¶ms->params.afex_update;
5131 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5133 /* Fill the ramrod data with provided parameters */
5134 rdata->vif_id_change_flg = 1;
5135 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5136 rdata->afex_default_vlan_change_flg = 1;
5137 rdata->afex_default_vlan =
5138 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5139 rdata->allowed_priorities_change_flg = 1;
5140 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5141 rdata->echo = AFEX_UPDATE;
5143 /* No need for an explicit memory barrier here as long we would
5144 * need to ensure the ordering of writing to the SPQ element
5145 * and updating of the SPQ producer which involves a memory
5146 * read and we will have to put a full memory barrier there
5147 * (inside ecore_sp_post()).
5149 ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5151 rdata->afex_default_vlan, rdata->allowed_priorities);
5153 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5154 data_mapping, NONE_CONNECTION_TYPE);
5158 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5159 struct ecore_func_state_params *params)
5161 struct ecore_func_sp_obj *o = params->f_obj;
5162 struct afex_vif_list_ramrod_data *rdata =
5163 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5164 struct ecore_func_afex_viflists_params *afex_vif_params =
5165 ¶ms->params.afex_viflists;
5166 uint64_t *p_rdata = (uint64_t *) rdata;
5168 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5170 /* Fill the ramrod data with provided parameters */
5171 rdata->vif_list_index =
5172 ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5173 rdata->func_bit_map = afex_vif_params->func_bit_map;
5174 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5175 rdata->func_to_clear = afex_vif_params->func_to_clear;
5177 /* send in echo type of sub command */
5178 rdata->echo = afex_vif_params->afex_vif_list_command;
5180 /* No need for an explicit memory barrier here as long we would
5181 * need to ensure the ordering of writing to the SPQ element
5182 * and updating of the SPQ producer which involves a memory
5183 * read and we will have to put a full memory barrier there
5184 * (inside ecore_sp_post()).
5188 ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5189 rdata->afex_vif_list_command, rdata->vif_list_index,
5190 rdata->func_bit_map, rdata->func_to_clear);
5192 /* this ramrod sends data directly and not through DMA mapping */
5193 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5194 *p_rdata, NONE_CONNECTION_TYPE);
5197 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5198 ecore_func_state_params *params)
5200 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5201 NONE_CONNECTION_TYPE);
5204 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5205 ecore_func_state_params *params)
5207 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5208 NONE_CONNECTION_TYPE);
5211 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5214 struct ecore_func_sp_obj *o = params->f_obj;
5215 struct flow_control_configuration *rdata =
5216 (struct flow_control_configuration *)o->rdata;
5217 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5218 struct ecore_func_tx_start_params *tx_start_params =
5219 ¶ms->params.tx_start;
5222 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5224 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5225 rdata->dcb_version = tx_start_params->dcb_version;
5226 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5228 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5229 rdata->traffic_type_to_priority_cos[i] =
5230 tx_start_params->traffic_type_to_priority_cos[i];
5232 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5233 data_mapping, NONE_CONNECTION_TYPE);
5236 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5237 struct ecore_func_state_params *params)
5239 switch (params->cmd) {
5240 case ECORE_F_CMD_HW_INIT:
5241 return ecore_func_hw_init(sc, params);
5242 case ECORE_F_CMD_START:
5243 return ecore_func_send_start(sc, params);
5244 case ECORE_F_CMD_STOP:
5245 return ecore_func_send_stop(sc, params);
5246 case ECORE_F_CMD_HW_RESET:
5247 return ecore_func_hw_reset(sc, params);
5248 case ECORE_F_CMD_AFEX_UPDATE:
5249 return ecore_func_send_afex_update(sc, params);
5250 case ECORE_F_CMD_AFEX_VIFLISTS:
5251 return ecore_func_send_afex_viflists(sc, params);
5252 case ECORE_F_CMD_TX_STOP:
5253 return ecore_func_send_tx_stop(sc, params);
5254 case ECORE_F_CMD_TX_START:
5255 return ecore_func_send_tx_start(sc, params);
5256 case ECORE_F_CMD_SWITCH_UPDATE:
5257 return ecore_func_send_switch_update(sc, params);
5259 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
5264 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5265 struct ecore_func_sp_obj *obj,
5266 void *rdata, ecore_dma_addr_t rdata_mapping,
5267 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5268 struct ecore_func_sp_drv_ops *drv_iface)
5270 ECORE_MEMSET(obj, 0, sizeof(*obj));
5272 ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5275 obj->rdata_mapping = rdata_mapping;
5276 obj->afex_rdata = afex_rdata;
5277 obj->afex_rdata_mapping = afex_rdata_mapping;
5278 obj->send_cmd = ecore_func_send_cmd;
5279 obj->check_transition = ecore_func_chk_transition;
5280 obj->complete_cmd = ecore_func_comp_cmd;
5281 obj->wait_comp = ecore_func_wait_comp;
5282 obj->drv = drv_iface;
5286 * ecore_func_state_change - perform Function state change transition
5288 * @sc: device handle
5289 * @params: parameters to perform the transaction
5291 * returns 0 in case of successfully completed transition,
5292 * negative error code in case of failure, positive
5293 * (EBUSY) value if there is a completion to that is
5294 * still pending (possible only if RAMROD_COMP_WAIT is
5295 * not set in params->ramrod_flags for asynchronous
5298 int ecore_func_state_change(struct bnx2x_softc *sc,
5299 struct ecore_func_state_params *params)
5301 struct ecore_func_sp_obj *o = params->f_obj;
5303 enum ecore_func_cmd cmd = params->cmd;
5304 unsigned long *pending = &o->pending;
5306 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5308 /* Check that the requested transition is legal */
5309 rc = o->check_transition(sc, o, params);
5310 if ((rc == ECORE_BUSY) &&
5311 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5312 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5313 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5315 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5316 rc = o->check_transition(sc, o, params);
5318 if (rc == ECORE_BUSY) {
5319 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5321 "timeout waiting for previous ramrod completion");
5325 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5329 /* Set "pending" bit */
5330 ECORE_SET_BIT(cmd, pending);
5332 /* Don't send a command if only driver cleanup was requested */
5333 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5334 ecore_func_state_change_comp(sc, o, cmd);
5335 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5338 rc = o->send_cmd(sc, params);
5340 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5343 o->next_state = ECORE_F_STATE_MAX;
5344 ECORE_CLEAR_BIT(cmd, pending);
5345 ECORE_SMP_MB_AFTER_CLEAR_BIT();
5349 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5350 rc = o->wait_comp(sc, o, cmd);
5354 return ECORE_SUCCESS;
5358 return ECORE_RET_PENDING(cmd, pending);
5361 /******************************************************************************
5363 * Calculates crc 8 on a word value: polynomial 0-1-2-8
5364 * Code was translated from Verilog.
5366 *****************************************************************************/
5367 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5375 /* split the data into 31 bits */
5376 for (i = 0; i < 32; i++) {
5377 D[i] = (uint8_t) (data & 1);
5381 /* split the crc into 8 bits */
5382 for (i = 0; i < 8; i++) {
5387 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5388 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5390 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5391 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5392 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5393 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5394 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5395 C[0] ^ C[1] ^ C[4] ^ C[5];
5396 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5397 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5398 C[1] ^ C[2] ^ C[5] ^ C[6];
5399 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5400 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5401 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5402 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5403 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5405 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5406 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5407 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5408 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5411 for (i = 0; i < 8; i++) {
5412 crc_res |= (NewCRC[i] << i);
5419 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5424 for (i = 0; i < 8; i++)
5425 crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);