1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015-2018 Cavium Inc.
10 * All rights reserved.
15 #include "ecore_init.h"
17 /**** Exe Queue interfaces ****/
20 * ecore_exe_queue_init - init the Exe Queue object
22 * @o: pointer to the object
24 * @owner: pointer to the owner
25 * @validate: validate function pointer
26 * @optimize: optimize function pointer
27 * @exec: execute function pointer
28 * @get: get function pointer
31 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
32 struct ecore_exe_queue_obj *o,
34 union ecore_qable_obj *owner,
35 exe_q_validate validate,
37 exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
39 ECORE_MEMSET(o, 0, sizeof(*o));
41 ECORE_LIST_INIT(&o->exe_queue);
42 ECORE_LIST_INIT(&o->pending_comp);
44 ECORE_SPIN_LOCK_INIT(&o->lock, sc);
46 o->exe_chunk_len = exe_len;
49 /* Owner specific callbacks */
50 o->validate = validate;
52 o->optimize = optimize;
56 ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
60 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
61 struct ecore_exeq_elem *elem)
63 ECORE_MSG(sc, "Deleting an exe_queue element");
64 ECORE_FREE(sc, elem, sizeof(*elem));
67 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
69 struct ecore_exeq_elem *elem;
72 ECORE_SPIN_LOCK_BH(&o->lock);
74 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
75 struct ecore_exeq_elem) cnt++;
77 ECORE_SPIN_UNLOCK_BH(&o->lock);
83 * ecore_exe_queue_add - add a new element to the execution queue
87 * @cmd: new command to add
88 * @restore: true - do not optimize the command
90 * If the element is optimized or is illegal, frees it.
92 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
93 struct ecore_exe_queue_obj *o,
94 struct ecore_exeq_elem *elem, int restore)
98 ECORE_SPIN_LOCK_BH(&o->lock);
101 /* Try to cancel this element queue */
102 rc = o->optimize(sc, o->owner, elem);
106 /* Check if this request is ok */
107 rc = o->validate(sc, o->owner, elem);
109 ECORE_MSG(sc, "Preamble failed: %d", rc);
114 /* If so, add it to the execution queue */
115 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
117 ECORE_SPIN_UNLOCK_BH(&o->lock);
119 return ECORE_SUCCESS;
122 ecore_exe_queue_free_elem(sc, elem);
124 ECORE_SPIN_UNLOCK_BH(&o->lock);
129 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
132 struct ecore_exeq_elem *elem;
134 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
135 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
136 struct ecore_exeq_elem, link);
138 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
139 ecore_exe_queue_free_elem(sc, elem);
143 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
144 struct ecore_exe_queue_obj *o)
146 ECORE_SPIN_LOCK_BH(&o->lock);
148 __ecore_exe_queue_reset_pending(sc, o);
150 ECORE_SPIN_UNLOCK_BH(&o->lock);
154 * ecore_exe_queue_step - execute one execution chunk atomically
158 * @ramrod_flags: flags
160 * (Should be called while holding the exe_queue->lock).
162 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
163 struct ecore_exe_queue_obj *o,
164 unsigned long *ramrod_flags)
166 struct ecore_exeq_elem *elem, spacer;
169 ECORE_MEMSET(&spacer, 0, sizeof(spacer));
171 /* Next step should not be performed until the current is finished,
172 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
173 * properly clear object internals without sending any command to the FW
174 * which also implies there won't be any completion to clear the
177 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
178 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
180 "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
181 __ecore_exe_queue_reset_pending(sc, o);
183 return ECORE_PENDING;
187 /* Run through the pending commands list and create a next
190 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
191 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
192 struct ecore_exeq_elem, link);
193 ECORE_DBG_BREAK_IF(!elem->cmd_len);
195 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
196 cur_len += elem->cmd_len;
197 /* Prevent from both lists being empty when moving an
198 * element. This will allow the call of
199 * ecore_exe_queue_empty() without locking.
201 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
203 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
204 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
205 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
212 return ECORE_SUCCESS;
214 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
216 /* In case of an error return the commands back to the queue
217 * and reset the pending_comp.
219 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
221 /* If zero is returned, means there are no outstanding pending
222 * completions and we may dismiss the pending list.
224 __ecore_exe_queue_reset_pending(sc, o);
229 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
231 int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
233 /* Don't reorder!!! */
236 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
239 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
243 ECORE_MSG(sc, "Allocating a new exe_queue element");
244 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
247 /************************ raw_obj functions ***********************************/
248 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
251 * !! converts the value returned by ECORE_TEST_BIT such that it
252 * is guaranteed not to be truncated regardless of int definition.
254 * Note we cannot simply define the function's return value type
255 * to match the type returned by ECORE_TEST_BIT, as it varies by
256 * platform/implementation.
259 return ! !ECORE_TEST_BIT(o->state, o->pstate);
262 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
264 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
265 ECORE_CLEAR_BIT(o->state, o->pstate);
266 ECORE_SMP_MB_AFTER_CLEAR_BIT();
269 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
271 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
272 ECORE_SET_BIT(o->state, o->pstate);
273 ECORE_SMP_MB_AFTER_CLEAR_BIT();
277 * ecore_state_wait - wait until the given bit(state) is cleared
280 * @state: state which is to be cleared
281 * @state_p: state buffer
284 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
285 unsigned long *pstate)
287 /* can take a while if any port is running */
290 if (CHIP_REV_IS_EMUL(sc))
293 ECORE_MSG(sc, "waiting for state to become %d", state);
297 bnx2x_intr_legacy(sc, 1);
298 if (!ECORE_TEST_BIT(state, pstate)) {
299 #ifdef ECORE_STOP_ON_ERROR
300 ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
302 return ECORE_SUCCESS;
305 ECORE_WAIT(sc, delay_us);
312 PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
313 #ifdef ECORE_STOP_ON_ERROR
317 return ECORE_TIMEOUT;
320 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
322 return ecore_state_wait(sc, raw->state, raw->pstate);
325 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
326 /* credit handling callbacks */
327 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
329 struct ecore_credit_pool_obj *mp = o->macs_pool;
331 ECORE_DBG_BREAK_IF(!mp);
333 return mp->get_entry(mp, offset);
336 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
338 struct ecore_credit_pool_obj *mp = o->macs_pool;
340 ECORE_DBG_BREAK_IF(!mp);
342 return mp->get(mp, 1);
345 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
347 struct ecore_credit_pool_obj *mp = o->macs_pool;
349 return mp->put_entry(mp, offset);
352 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
354 struct ecore_credit_pool_obj *mp = o->macs_pool;
356 return mp->put(mp, 1);
360 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
364 * @o: vlan_mac object
366 * @details: Non-blocking implementation; should be called under execution
369 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
370 struct ecore_vlan_mac_obj *o)
372 if (o->head_reader) {
373 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
377 ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
378 return ECORE_SUCCESS;
382 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
383 * which wasn't able to run due to a taken lock on vlan mac head list.
386 * @o: vlan_mac object
388 * @details Should be called under execution queue lock; notice it might release
389 * and reclaim it during its run.
391 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
392 struct ecore_vlan_mac_obj *o)
395 unsigned long ramrod_flags = o->saved_ramrod_flags;
397 ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu",
399 o->head_exe_request = FALSE;
400 o->saved_ramrod_flags = 0;
401 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
402 if (rc != ECORE_SUCCESS) {
404 "execution of pending commands failed with rc %d",
406 #ifdef ECORE_STOP_ON_ERROR
413 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
414 * called due to vlan mac head list lock being taken.
417 * @o: vlan_mac object
418 * @ramrod_flags: ramrod flags of missed execution
420 * @details Should be called under execution queue lock.
422 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
423 struct ecore_vlan_mac_obj *o,
424 unsigned long ramrod_flags)
426 o->head_exe_request = TRUE;
427 o->saved_ramrod_flags = ramrod_flags;
428 ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu",
433 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
436 * @o: vlan_mac object
438 * @details Should be called under execution queue lock. Notice if a pending
439 * execution exists, it would perform it - possibly releasing and
440 * reclaiming the execution queue lock.
442 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
443 struct ecore_vlan_mac_obj *o)
445 /* It's possible a new pending execution was added since this writer
446 * executed. If so, execute again. [Ad infinitum]
448 while (o->head_exe_request) {
450 "vlan_mac_lock - writer release encountered a pending request");
451 __ecore_vlan_mac_h_exec_pending(sc, o);
456 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
459 * @o: vlan_mac object
461 * @details Notice if a pending execution exists, it would perform it -
462 * possibly releasing and reclaiming the execution queue lock.
464 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
465 struct ecore_vlan_mac_obj *o)
467 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
468 __ecore_vlan_mac_h_write_unlock(sc, o);
469 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
473 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
476 * @o: vlan_mac object
478 * @details Should be called under the execution queue lock. May sleep. May
479 * release and reclaim execution queue lock during its run.
481 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
482 struct ecore_vlan_mac_obj *o)
484 /* If we got here, we're holding lock --> no WRITER exists */
487 "vlan_mac_lock - locked reader - number %d", o->head_reader);
489 return ECORE_SUCCESS;
493 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
496 * @o: vlan_mac object
498 * @details May sleep. Claims and releases execution queue lock during its run.
500 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
501 struct ecore_vlan_mac_obj *o)
505 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
506 rc = __ecore_vlan_mac_h_read_lock(sc, o);
507 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
513 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
516 * @o: vlan_mac object
518 * @details Should be called under execution queue lock. Notice if a pending
519 * execution exists, it would be performed if this was the last
520 * reader. possibly releasing and reclaiming the execution queue lock.
522 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
523 struct ecore_vlan_mac_obj *o)
525 if (!o->head_reader) {
527 "Need to release vlan mac reader lock, but lock isn't taken");
528 #ifdef ECORE_STOP_ON_ERROR
533 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d",
537 /* It's possible a new pending execution was added, and that this reader
538 * was last - if so we need to execute the command.
540 if (!o->head_reader && o->head_exe_request) {
541 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request");
543 /* Writer release will do the trick */
544 __ecore_vlan_mac_h_write_unlock(sc, o);
549 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
552 * @o: vlan_mac object
554 * @details Notice if a pending execution exists, it would be performed if this
555 * was the last reader. Claims and releases the execution queue lock
558 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
559 struct ecore_vlan_mac_obj *o)
561 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
562 __ecore_vlan_mac_h_read_unlock(sc, o);
563 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
567 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
570 * @o: vlan_mac object
571 * @n: number of elements to get
572 * @base: base address for element placement
573 * @stride: stride between elements (in bytes)
575 static int ecore_get_n_elements(struct bnx2x_softc *sc,
576 struct ecore_vlan_mac_obj *o, int n,
577 uint8_t * base, uint8_t stride, uint8_t size)
579 struct ecore_vlan_mac_registry_elem *pos;
580 uint8_t *next = base;
581 int counter = 0, read_lock;
583 ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
584 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
585 if (read_lock != ECORE_SUCCESS)
587 "get_n_elements failed to get vlan mac reader lock; Access without lock");
590 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
591 struct ecore_vlan_mac_registry_elem) {
593 ECORE_MEMCPY(next, &pos->u, size);
596 (sc, "copied element number %d to address %p element was:",
598 next += stride + size;
602 if (read_lock == ECORE_SUCCESS) {
603 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
604 ecore_vlan_mac_h_read_unlock(sc, o);
607 return counter * ETH_ALEN;
610 /* check_add() callbacks */
611 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
612 struct ecore_vlan_mac_obj *o,
613 union ecore_classification_ramrod_data *data)
615 struct ecore_vlan_mac_registry_elem *pos;
617 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
618 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
619 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
621 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
624 /* Check if a requested MAC already exists */
625 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
626 struct ecore_vlan_mac_registry_elem)
627 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
628 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
631 return ECORE_SUCCESS;
634 /* check_del() callbacks */
635 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
641 ecore_classification_ramrod_data
644 struct ecore_vlan_mac_registry_elem *pos;
646 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
647 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
648 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
650 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
651 struct ecore_vlan_mac_registry_elem)
652 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
653 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
659 /* check_move() callback */
660 static int ecore_check_move(struct bnx2x_softc *sc,
661 struct ecore_vlan_mac_obj *src_o,
662 struct ecore_vlan_mac_obj *dst_o,
663 union ecore_classification_ramrod_data *data)
665 struct ecore_vlan_mac_registry_elem *pos;
668 /* Check if we can delete the requested configuration from the first
671 pos = src_o->check_del(sc, src_o, data);
673 /* check if configuration can be added */
674 rc = dst_o->check_add(sc, dst_o, data);
676 /* If this classification can not be added (is already set)
677 * or can't be deleted - return an error.
685 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
686 __rte_unused struct ecore_vlan_mac_obj
687 *src_o, __rte_unused struct ecore_vlan_mac_obj
688 *dst_o, __rte_unused union
689 ecore_classification_ramrod_data *data)
694 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
697 struct ecore_raw_obj *raw = &o->raw;
698 uint8_t rx_tx_flag = 0;
700 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
701 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
702 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
704 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
705 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
706 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
711 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
712 int add, unsigned char *dev_addr, int index)
715 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
716 NIG_REG_LLH0_FUNC_MEM;
718 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
721 if (index > ECORE_LLH_CAM_MAX_PF_LINE)
724 ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
725 (add ? "ADD" : "DELETE"), index);
728 /* LLH_FUNC_MEM is a uint64_t WB register */
729 reg_offset += 8 * index;
731 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
732 (dev_addr[4] << 8) | dev_addr[5]);
733 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
735 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
738 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
739 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
743 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
746 * @o: queue for which we want to configure this rule
747 * @add: if TRUE the command is an ADD command, DEL otherwise
748 * @opcode: CLASSIFY_RULE_OPCODE_XXX
749 * @hdr: pointer to a header to setup
752 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
754 struct eth_classify_cmd_header
757 struct ecore_raw_obj *raw = &o->raw;
759 hdr->client_id = raw->cl_id;
760 hdr->func_id = raw->func_id;
762 /* Rx or/and Tx (internal switching) configuration ? */
763 hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
766 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
768 hdr->cmd_general_data |=
769 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
773 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
775 * @cid: connection id
776 * @type: ECORE_FILTER_XXX_PENDING
777 * @hdr: pointer to header to setup
780 * currently we always configure one rule and echo field to contain a CID and an
783 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
786 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
787 (type << ECORE_SWCID_SHIFT));
788 hdr->rule_cnt = (uint8_t) rule_cnt;
791 /* hw_config() callbacks */
792 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
793 struct ecore_vlan_mac_obj *o,
794 struct ecore_exeq_elem *elem, int rule_idx,
795 __rte_unused int cam_offset)
797 struct ecore_raw_obj *raw = &o->raw;
798 struct eth_classify_rules_ramrod_data *data =
799 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
800 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
801 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
802 int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
803 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
804 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
806 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
807 * relevant. In addition, current implementation is tuned for a
810 * When multiple unicast ETH MACs PF configuration in switch
811 * independent mode is required (NetQ, multiple netdev MACs,
812 * etc.), consider better utilisation of 8 per function MAC
813 * entries in the LLH register. There is also
814 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
815 * total number of CAM entries to 16.
817 * Currently we won't configure NIG for MACs other than a primary ETH
818 * MAC and iSCSI L2 MAC.
820 * If this MAC is moving from one Queue to another, no need to change
823 if (cmd != ECORE_VLAN_MAC_MOVE) {
824 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
825 ecore_set_mac_in_nig(sc, add, mac,
826 ECORE_LLH_CAM_ISCSI_ETH_LINE);
827 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
828 ecore_set_mac_in_nig(sc, add, mac,
829 ECORE_LLH_CAM_ETH_LINE);
832 /* Reset the ramrod data buffer for the first rule */
834 ECORE_MEMSET(data, 0, sizeof(*data));
836 /* Setup a command header */
837 ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
838 &rule_entry->mac.header);
840 ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
841 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
842 mac[4], mac[5], raw->cl_id);
844 /* Set a MAC itself */
845 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
846 &rule_entry->mac.mac_mid,
847 &rule_entry->mac.mac_lsb, mac);
848 rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
850 /* MOVE: Add a rule that will add this MAC to the target Queue */
851 if (cmd == ECORE_VLAN_MAC_MOVE) {
855 /* Setup ramrod data */
856 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
857 vlan_mac.target_obj, TRUE,
858 CLASSIFY_RULE_OPCODE_MAC,
859 &rule_entry->mac.header);
861 /* Set a MAC itself */
862 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
863 &rule_entry->mac.mac_mid,
864 &rule_entry->mac.mac_lsb, mac);
865 rule_entry->mac.inner_mac =
866 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
869 /* Set the ramrod data header */
870 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
875 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
880 * @cam_offset: offset in cam memory
881 * @hdr: pointer to a header to setup
885 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
886 *o, int type, int cam_offset, struct mac_configuration_hdr
889 struct ecore_raw_obj *r = &o->raw;
892 hdr->offset = (uint8_t) cam_offset;
893 hdr->client_id = ECORE_CPU_TO_LE16(0xff);
894 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
895 (type << ECORE_SWCID_SHIFT));
898 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
899 *o, int add, int opcode,
901 uint16_t vlan_id, struct
902 mac_configuration_entry
905 struct ecore_raw_obj *r = &o->raw;
906 uint32_t cl_bit_vec = (1 << r->cl_id);
908 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
909 cfg_entry->pf_id = r->func_id;
910 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
913 ECORE_SET_FLAG(cfg_entry->flags,
914 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
915 T_ETH_MAC_COMMAND_SET);
916 ECORE_SET_FLAG(cfg_entry->flags,
917 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
920 /* Set a MAC in a ramrod data */
921 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
922 &cfg_entry->middle_mac_addr,
923 &cfg_entry->lsb_mac_addr, mac);
925 ECORE_SET_FLAG(cfg_entry->flags,
926 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
927 T_ETH_MAC_COMMAND_INVALIDATE);
930 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
932 struct ecore_vlan_mac_obj *o,
933 int type, int cam_offset,
934 int add, uint8_t * mac,
935 uint16_t vlan_id, int opcode,
936 struct mac_configuration_cmd
939 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
941 ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
942 ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
945 ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
946 (add ? "setting" : "clearing"),
947 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
948 o->raw.cl_id, cam_offset);
952 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
955 * @o: ecore_vlan_mac_obj
956 * @elem: ecore_exeq_elem
957 * @rule_idx: rule_idx
958 * @cam_offset: cam_offset
960 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
961 struct ecore_vlan_mac_obj *o,
962 struct ecore_exeq_elem *elem,
963 __rte_unused int rule_idx, int cam_offset)
965 struct ecore_raw_obj *raw = &o->raw;
966 struct mac_configuration_cmd *config =
967 (struct mac_configuration_cmd *)(raw->rdata);
968 /* 57711 do not support MOVE command,
969 * so it's either ADD or DEL
971 int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
974 /* Reset the ramrod data buffer */
975 ECORE_MEMSET(config, 0, sizeof(*config));
977 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
979 elem->cmd_data.vlan_mac.u.mac.mac, 0,
980 ETH_VLAN_FILTER_ANY_VLAN, config);
984 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
987 * @p: command parameters
988 * @ppos: pointer to the cookie
990 * reconfigure next MAC/VLAN/VLAN-MAC element from the
991 * previously configured elements list.
993 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
996 * pointer to the cookie - that should be given back in the next call to make
997 * function handle the next element. If *ppos is set to NULL it will restart the
998 * iterator. If returned *ppos == NULL this means that the last element has been
1002 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1003 struct ecore_vlan_mac_ramrod_params *p,
1004 struct ecore_vlan_mac_registry_elem **ppos)
1006 struct ecore_vlan_mac_registry_elem *pos;
1007 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1009 /* If list is empty - there is nothing to do here */
1010 if (ECORE_LIST_IS_EMPTY(&o->head)) {
1015 /* make a step... */
1017 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1018 ecore_vlan_mac_registry_elem,
1021 *ppos = ECORE_LIST_NEXT(*ppos, link,
1022 struct ecore_vlan_mac_registry_elem);
1026 /* If it's the last step - return NULL */
1027 if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1030 /* Prepare a 'user_req' */
1031 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1033 /* Set the command */
1034 p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1036 /* Set vlan_mac_flags */
1037 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1039 /* Set a restore bit */
1040 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1042 return ecore_config_vlan_mac(sc, p);
1045 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1046 * pointer to an element with a specific criteria and NULL if such an element
1047 * hasn't been found.
1049 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1050 struct ecore_exeq_elem *elem)
1052 struct ecore_exeq_elem *pos;
1053 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1055 /* Check pending for execution commands */
1056 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1057 struct ecore_exeq_elem)
1058 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1060 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1067 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1069 * @sc: device handle
1070 * @qo: ecore_qable_obj
1071 * @elem: ecore_exeq_elem
1073 * Checks that the requested configuration can be added. If yes and if
1074 * requested, consume CAM credit.
1076 * The 'validate' is run after the 'optimize'.
1079 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1080 union ecore_qable_obj *qo,
1081 struct ecore_exeq_elem *elem)
1083 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1084 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1087 /* Check the registry */
1088 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1091 "ADD command is not allowed considering current registry state.");
1095 /* Check if there is a pending ADD command for this
1096 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1098 if (exeq->get(exeq, elem)) {
1099 ECORE_MSG(sc, "There is a pending ADD command already");
1100 return ECORE_EXISTS;
1103 /* Consume the credit if not requested not to */
1104 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1105 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1109 return ECORE_SUCCESS;
1113 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1115 * @sc: device handle
1116 * @qo: quable object to check
1117 * @elem: element that needs to be deleted
1119 * Checks that the requested configuration can be deleted. If yes and if
1120 * requested, returns a CAM credit.
1122 * The 'validate' is run after the 'optimize'.
1124 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1125 union ecore_qable_obj *qo,
1126 struct ecore_exeq_elem *elem)
1128 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1129 struct ecore_vlan_mac_registry_elem *pos;
1130 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1131 struct ecore_exeq_elem query_elem;
1133 /* If this classification can not be deleted (doesn't exist)
1134 * - return a ECORE_EXIST.
1136 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1139 "DEL command is not allowed considering current registry state");
1140 return ECORE_EXISTS;
1143 /* Check if there are pending DEL or MOVE commands for this
1144 * MAC/VLAN/VLAN-MAC. Return an error if so.
1146 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1148 /* Check for MOVE commands */
1149 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1150 if (exeq->get(exeq, &query_elem)) {
1151 PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
1155 /* Check for DEL commands */
1156 if (exeq->get(exeq, elem)) {
1157 ECORE_MSG(sc, "There is a pending DEL command already");
1158 return ECORE_EXISTS;
1161 /* Return the credit to the credit pool if not requested not to */
1162 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1163 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1164 o->put_credit(o))) {
1165 PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
1169 return ECORE_SUCCESS;
1173 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1175 * @sc: device handle
1176 * @qo: quable object to check (source)
1177 * @elem: element that needs to be moved
1179 * Checks that the requested configuration can be moved. If yes and if
1180 * requested, returns a CAM credit.
1182 * The 'validate' is run after the 'optimize'.
1184 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1185 union ecore_qable_obj *qo,
1186 struct ecore_exeq_elem *elem)
1188 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1189 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1190 struct ecore_exeq_elem query_elem;
1191 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1192 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1194 /* Check if we can perform this operation based on the current registry
1197 if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1199 "MOVE command is not allowed considering current registry state");
1203 /* Check if there is an already pending DEL or MOVE command for the
1204 * source object or ADD command for a destination object. Return an
1207 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1209 /* Check DEL on source */
1210 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1211 if (src_exeq->get(src_exeq, &query_elem)) {
1212 PMD_DRV_LOG(ERR, sc,
1213 "There is a pending DEL command on the source queue already");
1217 /* Check MOVE on source */
1218 if (src_exeq->get(src_exeq, elem)) {
1219 ECORE_MSG(sc, "There is a pending MOVE command already");
1220 return ECORE_EXISTS;
1223 /* Check ADD on destination */
1224 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1225 if (dest_exeq->get(dest_exeq, &query_elem)) {
1226 PMD_DRV_LOG(ERR, sc,
1227 "There is a pending ADD command on the destination queue already");
1231 /* Consume the credit if not requested not to */
1232 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1233 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1234 dest_o->get_credit(dest_o)))
1237 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1238 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1239 src_o->put_credit(src_o))) {
1240 /* return the credit taken from dest... */
1241 dest_o->put_credit(dest_o);
1245 return ECORE_SUCCESS;
1248 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1249 union ecore_qable_obj *qo,
1250 struct ecore_exeq_elem *elem)
1252 switch (elem->cmd_data.vlan_mac.cmd) {
1253 case ECORE_VLAN_MAC_ADD:
1254 return ecore_validate_vlan_mac_add(sc, qo, elem);
1255 case ECORE_VLAN_MAC_DEL:
1256 return ecore_validate_vlan_mac_del(sc, qo, elem);
1257 case ECORE_VLAN_MAC_MOVE:
1258 return ecore_validate_vlan_mac_move(sc, qo, elem);
1264 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1265 union ecore_qable_obj *qo,
1266 struct ecore_exeq_elem *elem)
1270 /* If consumption wasn't required, nothing to do */
1271 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1272 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1273 return ECORE_SUCCESS;
1275 switch (elem->cmd_data.vlan_mac.cmd) {
1276 case ECORE_VLAN_MAC_ADD:
1277 case ECORE_VLAN_MAC_MOVE:
1278 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1280 case ECORE_VLAN_MAC_DEL:
1281 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1290 return ECORE_SUCCESS;
1294 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1296 * @sc: device handle
1297 * @o: ecore_vlan_mac_obj
1300 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1301 struct ecore_vlan_mac_obj *o)
1304 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1305 struct ecore_raw_obj *raw = &o->raw;
1308 /* Wait for the current command to complete */
1309 rc = raw->wait_comp(sc, raw);
1313 /* Wait until there are no pending commands */
1314 if (!ecore_exe_queue_empty(exeq))
1315 ECORE_WAIT(sc, 1000);
1317 return ECORE_SUCCESS;
1320 return ECORE_TIMEOUT;
1323 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1324 struct ecore_vlan_mac_obj *o,
1325 unsigned long *ramrod_flags)
1327 int rc = ECORE_SUCCESS;
1329 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1331 ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
1332 rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1334 if (rc != ECORE_SUCCESS) {
1335 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1337 /** Calling function should not diffrentiate between this case
1338 * and the case in which there is already a pending ramrod
1342 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1344 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1350 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1352 * @sc: device handle
1353 * @o: ecore_vlan_mac_obj
1355 * @cont: if TRUE schedule next execution chunk
1358 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1359 struct ecore_vlan_mac_obj *o,
1360 union event_ring_elem *cqe,
1361 unsigned long *ramrod_flags)
1363 struct ecore_raw_obj *r = &o->raw;
1366 /* Reset pending list */
1367 ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1370 r->clear_pending(r);
1372 /* If ramrod failed this is most likely a SW bug */
1373 if (cqe->message.error)
1376 /* Run the next bulk of pending commands if requested */
1377 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1378 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1383 /* If there is more work to do return PENDING */
1384 if (!ecore_exe_queue_empty(&o->exe_queue))
1385 return ECORE_PENDING;
1387 return ECORE_SUCCESS;
1391 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1393 * @sc: device handle
1394 * @o: ecore_qable_obj
1395 * @elem: ecore_exeq_elem
1397 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1398 union ecore_qable_obj *qo,
1399 struct ecore_exeq_elem *elem)
1401 struct ecore_exeq_elem query, *pos;
1402 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1403 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1405 ECORE_MEMCPY(&query, elem, sizeof(query));
1407 switch (elem->cmd_data.vlan_mac.cmd) {
1408 case ECORE_VLAN_MAC_ADD:
1409 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1411 case ECORE_VLAN_MAC_DEL:
1412 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1415 /* Don't handle anything other than ADD or DEL */
1419 /* If we found the appropriate element - delete it */
1420 pos = exeq->get(exeq, &query);
1423 /* Return the credit of the optimized command */
1424 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1425 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1426 if ((query.cmd_data.vlan_mac.cmd ==
1427 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1428 PMD_DRV_LOG(ERR, sc,
1429 "Failed to return the credit for the optimized ADD command");
1431 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1432 PMD_DRV_LOG(ERR, sc,
1433 "Failed to recover the credit from the optimized DEL command");
1438 ECORE_MSG(sc, "Optimizing %s command",
1439 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1442 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1443 ecore_exe_queue_free_elem(sc, pos);
1451 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1453 * @sc: device handle
1459 * prepare a registry element according to the current command request.
1461 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1462 struct ecore_vlan_mac_obj *o,
1463 struct ecore_exeq_elem *elem,
1465 ecore_vlan_mac_registry_elem
1468 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1469 struct ecore_vlan_mac_registry_elem *reg_elem;
1471 /* Allocate a new registry element if needed. */
1473 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1474 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1478 /* Get a new CAM offset */
1479 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1480 /* This shall never happen, because we have checked the
1481 * CAM availability in the 'validate'.
1483 ECORE_DBG_BREAK_IF(1);
1484 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1488 ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
1490 /* Set a VLAN-MAC data */
1491 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u,
1492 sizeof(reg_elem->u));
1494 /* Copy the flags (needed for DEL and RESTORE flows) */
1495 reg_elem->vlan_mac_flags =
1496 elem->cmd_data.vlan_mac.vlan_mac_flags;
1497 } else /* DEL, RESTORE */
1498 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1501 return ECORE_SUCCESS;
1505 * ecore_execute_vlan_mac - execute vlan mac command
1507 * @sc: device handle
1512 * go and send a ramrod!
1514 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1515 union ecore_qable_obj *qo,
1516 ecore_list_t * exe_chunk,
1517 unsigned long *ramrod_flags)
1519 struct ecore_exeq_elem *elem;
1520 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1521 struct ecore_raw_obj *r = &o->raw;
1523 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1524 int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1525 struct ecore_vlan_mac_registry_elem *reg_elem;
1526 enum ecore_vlan_mac_cmd cmd;
1528 /* If DRIVER_ONLY execution is requested, cleanup a registry
1529 * and exit. Otherwise send a ramrod to FW.
1536 /* Fill the ramrod data */
1537 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1538 struct ecore_exeq_elem) {
1539 cmd = elem->cmd_data.vlan_mac.cmd;
1540 /* We will add to the target object in MOVE command, so
1541 * change the object for a CAM search.
1543 if (cmd == ECORE_VLAN_MAC_MOVE)
1544 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1548 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1554 ECORE_DBG_BREAK_IF(!reg_elem);
1556 /* Push a new entry into the registry */
1558 ((cmd == ECORE_VLAN_MAC_ADD) ||
1559 (cmd == ECORE_VLAN_MAC_MOVE)))
1560 ECORE_LIST_PUSH_HEAD(®_elem->link,
1563 /* Configure a single command in a ramrod data buffer */
1564 o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1566 /* MOVE command consumes 2 entries in the ramrod data */
1567 if (cmd == ECORE_VLAN_MAC_MOVE)
1574 * No need for an explicit memory barrier here as long we would
1575 * need to ensure the ordering of writing to the SPQ element
1576 * and updating of the SPQ producer which involves a memory
1577 * read and we will have to put a full memory barrier there
1578 * (inside ecore_sp_post()).
1581 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1582 r->rdata_mapping, ETH_CONNECTION_TYPE);
1587 /* Now, when we are done with the ramrod - clean up the registry */
1588 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1589 cmd = elem->cmd_data.vlan_mac.cmd;
1590 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1591 reg_elem = o->check_del(sc, o,
1592 &elem->cmd_data.vlan_mac.u);
1594 ECORE_DBG_BREAK_IF(!reg_elem);
1596 o->put_cam_offset(o, reg_elem->cam_offset);
1597 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head);
1598 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1603 return ECORE_PENDING;
1605 return ECORE_SUCCESS;
1608 r->clear_pending(r);
1610 /* Cleanup a registry in case of a failure */
1611 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1612 cmd = elem->cmd_data.vlan_mac.cmd;
1614 if (cmd == ECORE_VLAN_MAC_MOVE)
1615 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1619 /* Delete all newly added above entries */
1621 ((cmd == ECORE_VLAN_MAC_ADD) ||
1622 (cmd == ECORE_VLAN_MAC_MOVE))) {
1623 reg_elem = o->check_del(sc, cam_obj,
1624 &elem->cmd_data.vlan_mac.u);
1626 ECORE_LIST_REMOVE_ENTRY(®_elem->link,
1628 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1636 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1637 ecore_vlan_mac_ramrod_params *p)
1639 struct ecore_exeq_elem *elem;
1640 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1641 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1643 /* Allocate the execution queue element */
1644 elem = ecore_exe_queue_alloc_elem(sc);
1648 /* Set the command 'length' */
1649 switch (p->user_req.cmd) {
1650 case ECORE_VLAN_MAC_MOVE:
1657 /* Fill the object specific info */
1658 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1659 sizeof(p->user_req));
1661 /* Try to add a new command to the pending list */
1662 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1666 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1668 * @sc: device handle
1672 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1673 struct ecore_vlan_mac_ramrod_params *p)
1675 int rc = ECORE_SUCCESS;
1676 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1677 unsigned long *ramrod_flags = &p->ramrod_flags;
1678 int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1679 struct ecore_raw_obj *raw = &o->raw;
1682 * Add new elements to the execution list for commands that require it.
1685 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1690 /* If nothing will be executed further in this iteration we want to
1691 * return PENDING if there are pending commands
1693 if (!ecore_exe_queue_empty(&o->exe_queue))
1696 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1698 "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1699 raw->clear_pending(raw);
1702 /* Execute commands if required */
1703 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1704 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1705 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1711 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1712 * then user want to wait until the last command is done.
1714 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1715 /* Wait maximum for the current exe_queue length iterations plus
1716 * one (for the current pending command).
1718 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1720 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1723 /* Wait for the current command to complete */
1724 rc = raw->wait_comp(sc, raw);
1728 /* Make a next step */
1729 rc = __ecore_vlan_mac_execute_step(sc,
1736 return ECORE_SUCCESS;
1743 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1745 * @sc: device handle
1748 * @ramrod_flags: execution flags to be used for this deletion
1750 * if the last operation has completed successfully and there are no
1751 * more elements left, positive value if the last operation has completed
1752 * successfully and there are more previously configured elements, negative
1753 * value is current operation has failed.
1755 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1756 struct ecore_vlan_mac_obj *o,
1757 unsigned long *vlan_mac_flags,
1758 unsigned long *ramrod_flags)
1760 struct ecore_vlan_mac_registry_elem *pos = NULL;
1761 int rc = 0, read_lock;
1762 struct ecore_vlan_mac_ramrod_params p;
1763 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1764 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1766 /* Clear pending commands first */
1768 ECORE_SPIN_LOCK_BH(&exeq->lock);
1770 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1771 &exeq->exe_queue, link,
1772 struct ecore_exeq_elem) {
1773 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1775 rc = exeq->remove(sc, exeq->owner, exeq_pos);
1777 PMD_DRV_LOG(ERR, sc, "Failed to remove command");
1778 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1781 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1783 ecore_exe_queue_free_elem(sc, exeq_pos);
1787 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1789 /* Prepare a command request */
1790 ECORE_MEMSET(&p, 0, sizeof(p));
1792 p.ramrod_flags = *ramrod_flags;
1793 p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1795 /* Add all but the last VLAN-MAC to the execution queue without actually
1796 * execution anything.
1798 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1799 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1800 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1802 ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1803 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1804 if (read_lock != ECORE_SUCCESS)
1807 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1808 struct ecore_vlan_mac_registry_elem) {
1809 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1810 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1811 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1812 rc = ecore_config_vlan_mac(sc, &p);
1814 PMD_DRV_LOG(ERR, sc,
1815 "Failed to add a new DEL command");
1816 ecore_vlan_mac_h_read_unlock(sc, o);
1822 ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1823 ecore_vlan_mac_h_read_unlock(sc, o);
1825 p.ramrod_flags = *ramrod_flags;
1826 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1828 return ecore_config_vlan_mac(sc, &p);
1831 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1832 uint32_t cid, uint8_t func_id,
1834 ecore_dma_addr_t rdata_mapping, int state,
1835 unsigned long *pstate, ecore_obj_type type)
1837 raw->func_id = func_id;
1841 raw->rdata_mapping = rdata_mapping;
1843 raw->pstate = pstate;
1844 raw->obj_type = type;
1845 raw->check_pending = ecore_raw_check_pending;
1846 raw->clear_pending = ecore_raw_clear_pending;
1847 raw->set_pending = ecore_raw_set_pending;
1848 raw->wait_comp = ecore_raw_wait;
1851 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1852 uint8_t cl_id, uint32_t cid,
1853 uint8_t func_id, void *rdata,
1854 ecore_dma_addr_t rdata_mapping,
1855 int state, unsigned long *pstate,
1856 ecore_obj_type type,
1857 struct ecore_credit_pool_obj
1858 *macs_pool, struct ecore_credit_pool_obj
1861 ECORE_LIST_INIT(&o->head);
1863 o->head_exe_request = FALSE;
1864 o->saved_ramrod_flags = 0;
1866 o->macs_pool = macs_pool;
1867 o->vlans_pool = vlans_pool;
1869 o->delete_all = ecore_vlan_mac_del_all;
1870 o->restore = ecore_vlan_mac_restore;
1871 o->complete = ecore_complete_vlan_mac;
1872 o->wait = ecore_wait_vlan_mac;
1874 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1875 state, pstate, type);
1878 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1879 struct ecore_vlan_mac_obj *mac_obj,
1880 uint8_t cl_id, uint32_t cid, uint8_t func_id,
1881 void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1882 unsigned long *pstate, ecore_obj_type type,
1883 struct ecore_credit_pool_obj *macs_pool)
1885 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1887 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1888 rdata_mapping, state, pstate, type,
1891 /* CAM credit pool handling */
1892 mac_obj->get_credit = ecore_get_credit_mac;
1893 mac_obj->put_credit = ecore_put_credit_mac;
1894 mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1895 mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1897 if (CHIP_IS_E1x(sc)) {
1898 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1899 mac_obj->check_del = ecore_check_mac_del;
1900 mac_obj->check_add = ecore_check_mac_add;
1901 mac_obj->check_move = ecore_check_move_always_err;
1902 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1905 ecore_exe_queue_init(sc,
1906 &mac_obj->exe_queue, 1, qable_obj,
1907 ecore_validate_vlan_mac,
1908 ecore_remove_vlan_mac,
1909 ecore_optimize_vlan_mac,
1910 ecore_execute_vlan_mac,
1911 ecore_exeq_get_mac);
1913 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1914 mac_obj->check_del = ecore_check_mac_del;
1915 mac_obj->check_add = ecore_check_mac_add;
1916 mac_obj->check_move = ecore_check_move;
1917 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1918 mac_obj->get_n_elements = ecore_get_n_elements;
1921 ecore_exe_queue_init(sc,
1922 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1923 qable_obj, ecore_validate_vlan_mac,
1924 ecore_remove_vlan_mac,
1925 ecore_optimize_vlan_mac,
1926 ecore_execute_vlan_mac,
1927 ecore_exeq_get_mac);
1931 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1932 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1933 tstorm_eth_mac_filter_config
1934 *mac_filters, uint16_t pf_id)
1936 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1938 uint32_t addr = BAR_TSTRORM_INTMEM +
1939 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1941 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1944 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1945 struct ecore_rx_mode_ramrod_params *p)
1947 /* update the sc MAC filter structure */
1948 uint32_t mask = (1 << p->cl_id);
1950 struct tstorm_eth_mac_filter_config *mac_filters =
1951 (struct tstorm_eth_mac_filter_config *)p->rdata;
1953 /* initial setting is drop-all */
1954 uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1955 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1956 uint8_t unmatched_unicast = 0;
1958 /* In e1x there we only take into account rx accept flag since tx switching
1960 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1961 /* accept matched ucast */
1964 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1965 /* accept matched mcast */
1968 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1969 /* accept all mcast */
1973 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1974 /* accept all mcast */
1978 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1979 /* accept (all) bcast */
1981 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1982 /* accept unmatched unicasts */
1983 unmatched_unicast = 1;
1985 mac_filters->ucast_drop_all = drop_all_ucast ?
1986 mac_filters->ucast_drop_all | mask :
1987 mac_filters->ucast_drop_all & ~mask;
1989 mac_filters->mcast_drop_all = drop_all_mcast ?
1990 mac_filters->mcast_drop_all | mask :
1991 mac_filters->mcast_drop_all & ~mask;
1993 mac_filters->ucast_accept_all = accp_all_ucast ?
1994 mac_filters->ucast_accept_all | mask :
1995 mac_filters->ucast_accept_all & ~mask;
1997 mac_filters->mcast_accept_all = accp_all_mcast ?
1998 mac_filters->mcast_accept_all | mask :
1999 mac_filters->mcast_accept_all & ~mask;
2001 mac_filters->bcast_accept_all = accp_all_bcast ?
2002 mac_filters->bcast_accept_all | mask :
2003 mac_filters->bcast_accept_all & ~mask;
2005 mac_filters->unmatched_unicast = unmatched_unicast ?
2006 mac_filters->unmatched_unicast | mask :
2007 mac_filters->unmatched_unicast & ~mask;
2009 ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2010 "accp_mcast 0x%xaccp_bcast 0x%x",
2011 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2012 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2013 mac_filters->bcast_accept_all);
2015 /* write the MAC filter structure */
2016 __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2018 /* The operation is completed */
2019 ECORE_CLEAR_BIT(p->state, p->pstate);
2020 ECORE_SMP_MB_AFTER_CLEAR_BIT();
2022 return ECORE_SUCCESS;
2025 /* Setup ramrod data */
2026 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2027 *hdr, uint8_t rule_cnt)
2029 hdr->echo = ECORE_CPU_TO_LE32(cid);
2030 hdr->rule_cnt = rule_cnt;
2033 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2034 *cmd, int clear_accept_all)
2038 /* start with 'drop-all' */
2039 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2040 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2042 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2043 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2045 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2046 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2048 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2049 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2050 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2053 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2054 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2055 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2057 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2058 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2060 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2061 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2062 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2064 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2065 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2067 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2068 if (clear_accept_all) {
2069 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2070 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2071 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2072 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2075 cmd->state = ECORE_CPU_TO_LE16(state);
2078 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2079 struct ecore_rx_mode_ramrod_params *p)
2081 struct eth_filter_rules_ramrod_data *data = p->rdata;
2083 uint8_t rule_idx = 0;
2085 /* Reset the ramrod data buffer */
2086 ECORE_MEMSET(data, 0, sizeof(*data));
2088 /* Setup ramrod data */
2090 /* Tx (internal switching) */
2091 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2092 data->rules[rule_idx].client_id = p->cl_id;
2093 data->rules[rule_idx].func_id = p->func_id;
2095 data->rules[rule_idx].cmd_general_data =
2096 ETH_FILTER_RULES_CMD_TX_CMD;
2098 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2099 &(data->rules[rule_idx++]),
2104 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2105 data->rules[rule_idx].client_id = p->cl_id;
2106 data->rules[rule_idx].func_id = p->func_id;
2108 data->rules[rule_idx].cmd_general_data =
2109 ETH_FILTER_RULES_CMD_RX_CMD;
2111 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2112 &(data->rules[rule_idx++]),
2116 /* If FCoE Queue configuration has been requested configure the Rx and
2117 * internal switching modes for this queue in separate rules.
2119 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2120 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2122 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2123 /* Tx (internal switching) */
2124 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2125 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2126 data->rules[rule_idx].func_id = p->func_id;
2128 data->rules[rule_idx].cmd_general_data =
2129 ETH_FILTER_RULES_CMD_TX_CMD;
2131 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2133 [rule_idx++]), TRUE);
2137 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2138 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2139 data->rules[rule_idx].func_id = p->func_id;
2141 data->rules[rule_idx].cmd_general_data =
2142 ETH_FILTER_RULES_CMD_RX_CMD;
2144 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2146 [rule_idx++]), TRUE);
2150 /* Set the ramrod header (most importantly - number of rules to
2153 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2156 (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2157 data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2159 /* No need for an explicit memory barrier here as long we would
2160 * need to ensure the ordering of writing to the SPQ element
2161 * and updating of the SPQ producer which involves a memory
2162 * read and we will have to put a full memory barrier there
2163 * (inside ecore_sp_post()).
2167 rc = ecore_sp_post(sc,
2168 RAMROD_CMD_ID_ETH_FILTER_RULES,
2169 p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2173 /* Ramrod completion is pending */
2174 return ECORE_PENDING;
2177 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2178 struct ecore_rx_mode_ramrod_params *p)
2180 return ecore_state_wait(sc, p->state, p->pstate);
2183 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2185 ecore_rx_mode_ramrod_params *p)
2188 return ECORE_SUCCESS;
2191 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2192 struct ecore_rx_mode_ramrod_params *p)
2196 /* Configure the new classification in the chip */
2197 if (p->rx_mode_obj->config_rx_mode) {
2198 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2202 /* Wait for a ramrod completion if was requested */
2203 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2204 rc = p->rx_mode_obj->wait_comp(sc, p);
2209 ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
2216 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2218 if (CHIP_IS_E1x(sc)) {
2219 o->wait_comp = ecore_empty_rx_mode_wait;
2220 o->config_rx_mode = ecore_set_rx_mode_e1x;
2222 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2223 o->config_rx_mode = ecore_set_rx_mode_e2;
2227 /********************* Multicast verbs: SET, CLEAR ****************************/
2228 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2230 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2233 struct ecore_mcast_mac_elem {
2234 ecore_list_entry_t link;
2235 uint8_t mac[ETH_ALEN];
2236 uint8_t pad[2]; /* For a natural alignment of the following buffer */
2239 struct ecore_pending_mcast_cmd {
2240 ecore_list_entry_t link;
2241 int type; /* ECORE_MCAST_CMD_X */
2243 ecore_list_t macs_head;
2244 uint32_t macs_num; /* Needed for DEL command */
2245 int next_bin; /* Needed for RESTORE flow with aprox match */
2248 int done; /* set to TRUE, when the command has been handled,
2249 * practically used in 57712 handling only, where one pending
2250 * command may be handled in a few operations. As long as for
2251 * other chips every operation handling is completed in a
2252 * single ramrod, there is no need to utilize this field.
2256 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2258 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2259 o->raw.wait_comp(sc, &o->raw))
2260 return ECORE_TIMEOUT;
2262 return ECORE_SUCCESS;
2265 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2266 struct ecore_mcast_obj *o,
2267 struct ecore_mcast_ramrod_params *p,
2268 enum ecore_mcast_cmd cmd)
2271 struct ecore_pending_mcast_cmd *new_cmd;
2272 struct ecore_mcast_mac_elem *cur_mac = NULL;
2273 struct ecore_mcast_list_elem *pos;
2274 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2275 p->mcast_list_len : 0);
2277 /* If the command is empty ("handle pending commands only"), break */
2278 if (!p->mcast_list_len)
2279 return ECORE_SUCCESS;
2281 total_sz = sizeof(*new_cmd) +
2282 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2284 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2285 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2290 ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
2291 cmd, macs_list_len);
2293 ECORE_LIST_INIT(&new_cmd->data.macs_head);
2295 new_cmd->type = cmd;
2296 new_cmd->done = FALSE;
2299 case ECORE_MCAST_CMD_ADD:
2300 cur_mac = (struct ecore_mcast_mac_elem *)
2301 ((uint8_t *) new_cmd + sizeof(*new_cmd));
2303 /* Push the MACs of the current command into the pending command
2306 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2307 struct ecore_mcast_list_elem) {
2308 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2309 ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2310 &new_cmd->data.macs_head);
2316 case ECORE_MCAST_CMD_DEL:
2317 new_cmd->data.macs_num = p->mcast_list_len;
2320 case ECORE_MCAST_CMD_RESTORE:
2321 new_cmd->data.next_bin = 0;
2325 ECORE_FREE(sc, new_cmd, total_sz);
2326 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2330 /* Push the new pending command to the tail of the pending list: FIFO */
2331 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2335 return ECORE_PENDING;
2339 * ecore_mcast_get_next_bin - get the next set bin (index)
2342 * @last: index to start looking from (including)
2344 * returns the next found (set) bin or a negative value if none is found.
2346 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2348 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2350 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2351 if (o->registry.aprox_match.vec[i])
2352 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2353 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2354 if (BIT_VEC64_TEST_BIT
2355 (o->registry.aprox_match.vec, cur_bit)) {
2367 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2371 * returns the index of the found bin or -1 if none is found
2373 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2375 int cur_bit = ecore_mcast_get_next_bin(o, 0);
2378 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2383 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2385 struct ecore_raw_obj *raw = &o->raw;
2386 uint8_t rx_tx_flag = 0;
2388 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2389 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2390 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2392 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2393 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2394 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2399 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2400 struct ecore_mcast_obj *o, int idx,
2401 union ecore_mcast_config_data *cfg_data,
2402 enum ecore_mcast_cmd cmd)
2404 struct ecore_raw_obj *r = &o->raw;
2405 struct eth_multicast_rules_ramrod_data *data =
2406 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2407 uint8_t func_id = r->func_id;
2408 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2411 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2412 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2414 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2416 /* Get a bin and update a bins' vector */
2418 case ECORE_MCAST_CMD_ADD:
2419 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2420 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2423 case ECORE_MCAST_CMD_DEL:
2424 /* If there were no more bins to clear
2425 * (ecore_mcast_clear_first_bin() returns -1) then we would
2426 * clear any (0xff) bin.
2427 * See ecore_mcast_validate_e2() for explanation when it may
2430 bin = ecore_mcast_clear_first_bin(o);
2433 case ECORE_MCAST_CMD_RESTORE:
2434 bin = cfg_data->bin;
2438 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2442 ECORE_MSG(sc, "%s bin %d",
2443 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2444 "Setting" : "Clearing"), bin);
2446 data->rules[idx].bin_id = (uint8_t) bin;
2447 data->rules[idx].func_id = func_id;
2448 data->rules[idx].engine_id = o->engine_id;
2452 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2454 * @sc: device handle
2456 * @start_bin: index in the registry to start from (including)
2457 * @rdata_idx: index in the ramrod data to start from
2459 * returns last handled bin index or -1 if all bins have been handled
2461 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2462 struct ecore_mcast_obj *o,
2463 int start_bin, int *rdata_idx)
2465 int cur_bin, cnt = *rdata_idx;
2466 union ecore_mcast_config_data cfg_data = { NULL };
2468 /* go through the registry and configure the bins from it */
2469 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2470 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2472 cfg_data.bin = (uint8_t) cur_bin;
2473 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2477 ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
2479 /* Break if we reached the maximum number
2482 if (cnt >= o->max_cmd_len)
2491 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2492 struct ecore_mcast_obj *o,
2493 struct ecore_pending_mcast_cmd
2494 *cmd_pos, int *line_idx)
2496 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2497 int cnt = *line_idx;
2498 union ecore_mcast_config_data cfg_data = { NULL };
2500 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2501 &cmd_pos->data.macs_head, link,
2502 struct ecore_mcast_mac_elem) {
2504 cfg_data.mac = &pmac_pos->mac[0];
2505 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2510 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2511 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2512 pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2514 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2515 &cmd_pos->data.macs_head);
2517 /* Break if we reached the maximum number
2520 if (cnt >= o->max_cmd_len)
2526 /* if no more MACs to configure - we are done */
2527 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2528 cmd_pos->done = TRUE;
2531 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2532 struct ecore_mcast_obj *o,
2533 struct ecore_pending_mcast_cmd
2534 *cmd_pos, int *line_idx)
2536 int cnt = *line_idx;
2538 while (cmd_pos->data.macs_num) {
2539 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2543 cmd_pos->data.macs_num--;
2545 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
2546 cmd_pos->data.macs_num, cnt);
2548 /* Break if we reached the maximum
2551 if (cnt >= o->max_cmd_len)
2557 /* If we cleared all bins - we are done */
2558 if (!cmd_pos->data.macs_num)
2559 cmd_pos->done = TRUE;
2562 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2563 struct ecore_mcast_obj *o, struct
2564 ecore_pending_mcast_cmd
2565 *cmd_pos, int *line_idx)
2567 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2570 if (cmd_pos->data.next_bin < 0)
2571 /* If o->set_restore returned -1 we are done */
2572 cmd_pos->done = TRUE;
2574 /* Start from the next bin next time */
2575 cmd_pos->data.next_bin++;
2578 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2579 ecore_mcast_ramrod_params
2582 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2584 struct ecore_mcast_obj *o = p->mcast_obj;
2586 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2587 &o->pending_cmds_head, link,
2588 struct ecore_pending_mcast_cmd) {
2589 switch (cmd_pos->type) {
2590 case ECORE_MCAST_CMD_ADD:
2591 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2594 case ECORE_MCAST_CMD_DEL:
2595 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2598 case ECORE_MCAST_CMD_RESTORE:
2599 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2604 PMD_DRV_LOG(ERR, sc,
2605 "Unknown command: %d", cmd_pos->type);
2609 /* If the command has been completed - remove it from the list
2610 * and free the memory
2612 if (cmd_pos->done) {
2613 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2614 &o->pending_cmds_head);
2615 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2618 /* Break if we reached the maximum number of rules */
2619 if (cnt >= o->max_cmd_len)
2626 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2627 struct ecore_mcast_obj *o,
2628 struct ecore_mcast_ramrod_params *p,
2631 struct ecore_mcast_list_elem *mlist_pos;
2632 union ecore_mcast_config_data cfg_data = { NULL };
2633 int cnt = *line_idx;
2635 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2636 struct ecore_mcast_list_elem) {
2637 cfg_data.mac = mlist_pos->mac;
2638 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2643 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2644 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2645 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2651 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2652 struct ecore_mcast_obj *o,
2653 struct ecore_mcast_ramrod_params *p,
2656 int cnt = *line_idx, i;
2658 for (i = 0; i < p->mcast_list_len; i++) {
2659 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2664 "Deleting MAC. %d left", p->mcast_list_len - i - 1);
2671 * ecore_mcast_handle_current_cmd -
2673 * @sc: device handle
2676 * @start_cnt: first line in the ramrod data that may be used
2678 * This function is called if there is enough place for the current command in
2680 * Returns number of lines filled in the ramrod data in total.
2682 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2683 ecore_mcast_ramrod_params *p,
2684 enum ecore_mcast_cmd cmd,
2687 struct ecore_mcast_obj *o = p->mcast_obj;
2688 int cnt = start_cnt;
2690 ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
2693 case ECORE_MCAST_CMD_ADD:
2694 ecore_mcast_hdl_add(sc, o, p, &cnt);
2697 case ECORE_MCAST_CMD_DEL:
2698 ecore_mcast_hdl_del(sc, o, p, &cnt);
2701 case ECORE_MCAST_CMD_RESTORE:
2702 o->hdl_restore(sc, o, 0, &cnt);
2706 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2710 /* The current command has been handled */
2711 p->mcast_list_len = 0;
2716 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2717 struct ecore_mcast_ramrod_params *p,
2718 enum ecore_mcast_cmd cmd)
2720 struct ecore_mcast_obj *o = p->mcast_obj;
2721 int reg_sz = o->get_registry_size(o);
2724 /* DEL command deletes all currently configured MACs */
2725 case ECORE_MCAST_CMD_DEL:
2726 o->set_registry_size(o, 0);
2729 /* RESTORE command will restore the entire multicast configuration */
2730 case ECORE_MCAST_CMD_RESTORE:
2731 /* Here we set the approximate amount of work to do, which in
2732 * fact may be only less as some MACs in postponed ADD
2733 * command(s) scheduled before this command may fall into
2734 * the same bin and the actual number of bins set in the
2735 * registry would be less than we estimated here. See
2736 * ecore_mcast_set_one_rule_e2() for further details.
2738 p->mcast_list_len = reg_sz;
2741 case ECORE_MCAST_CMD_ADD:
2742 case ECORE_MCAST_CMD_CONT:
2743 /* Here we assume that all new MACs will fall into new bins.
2744 * However we will correct the real registry size after we
2745 * handle all pending commands.
2747 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2751 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2755 /* Increase the total number of MACs pending to be configured */
2756 o->total_pending_num += p->mcast_list_len;
2758 return ECORE_SUCCESS;
2761 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2762 struct ecore_mcast_ramrod_params *p,
2765 struct ecore_mcast_obj *o = p->mcast_obj;
2767 o->set_registry_size(o, old_num_bins);
2768 o->total_pending_num -= p->mcast_list_len;
2772 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2774 * @sc: device handle
2776 * @len: number of rules to handle
2778 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2779 *sc, struct ecore_mcast_ramrod_params
2782 struct ecore_raw_obj *r = &p->mcast_obj->raw;
2783 struct eth_multicast_rules_ramrod_data *data =
2784 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2786 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2787 (ECORE_FILTER_MCAST_PENDING <<
2788 ECORE_SWCID_SHIFT));
2789 data->header.rule_cnt = len;
2793 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2795 * @sc: device handle
2798 * Recalculate the actual number of set bins in the registry using Brian
2799 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2801 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2806 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2807 elem = o->registry.aprox_match.vec[i];
2812 o->set_registry_size(o, cnt);
2814 return ECORE_SUCCESS;
2817 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2818 struct ecore_mcast_ramrod_params *p,
2819 enum ecore_mcast_cmd cmd)
2821 struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2822 struct ecore_mcast_obj *o = p->mcast_obj;
2823 struct eth_multicast_rules_ramrod_data *data =
2824 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2827 /* Reset the ramrod data buffer */
2828 ECORE_MEMSET(data, 0, sizeof(*data));
2830 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2832 /* If there are no more pending commands - clear SCHEDULED state */
2833 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2836 /* The below may be TRUE if there was enough room in ramrod
2837 * data for all pending commands and for the current
2838 * command. Otherwise the current command would have been added
2839 * to the pending commands and p->mcast_list_len would have been
2842 if (p->mcast_list_len > 0)
2843 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2845 /* We've pulled out some MACs - update the total number of
2848 o->total_pending_num -= cnt;
2851 ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2852 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2854 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2856 /* Update a registry size if there are no more pending operations.
2858 * We don't want to change the value of the registry size if there are
2859 * pending operations because we want it to always be equal to the
2860 * exact or the approximate number (see ecore_mcast_validate_e2()) of
2861 * set bins after the last requested operation in order to properly
2862 * evaluate the size of the next DEL/RESTORE operation.
2864 * Note that we update the registry itself during command(s) handling
2865 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2866 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2867 * with a limited amount of update commands (per MAC/bin) and we don't
2868 * know in this scope what the actual state of bins configuration is
2869 * going to be after this ramrod.
2871 if (!o->total_pending_num)
2872 ecore_mcast_refresh_registry_e2(o);
2874 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2875 * RAMROD_PENDING status immediately.
2877 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2878 raw->clear_pending(raw);
2879 return ECORE_SUCCESS;
2881 /* No need for an explicit memory barrier here as long we would
2882 * need to ensure the ordering of writing to the SPQ element
2883 * and updating of the SPQ producer which involves a memory
2884 * read and we will have to put a full memory barrier there
2885 * (inside ecore_sp_post()).
2889 rc = ecore_sp_post(sc,
2890 RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2892 raw->rdata_mapping, ETH_CONNECTION_TYPE);
2896 /* Ramrod completion is pending */
2897 return ECORE_PENDING;
2901 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2902 struct ecore_mcast_ramrod_params *p,
2903 enum ecore_mcast_cmd cmd)
2905 /* Mark, that there is a work to do */
2906 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2907 p->mcast_list_len = 1;
2909 return ECORE_SUCCESS;
2912 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2913 __rte_unused struct ecore_mcast_ramrod_params
2914 *p, __rte_unused int old_num_bins)
2919 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2921 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2924 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2925 struct ecore_mcast_obj *o,
2926 struct ecore_mcast_ramrod_params *p,
2927 uint32_t * mc_filter)
2929 struct ecore_mcast_list_elem *mlist_pos;
2932 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2933 struct ecore_mcast_list_elem) {
2934 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2935 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2938 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2939 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2940 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2943 /* bookkeeping... */
2944 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2948 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2950 struct ecore_mcast_obj *o,
2951 uint32_t * mc_filter)
2955 for (bit = ecore_mcast_get_next_bin(o, 0);
2956 bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2957 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2958 ECORE_MSG(sc, "About to set bin %d", bit);
2962 /* On 57711 we write the multicast MACs' approximate match
2963 * table by directly into the TSTORM's internal RAM. So we don't
2964 * really need to handle any tricks to make it work.
2966 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2967 struct ecore_mcast_ramrod_params *p,
2968 enum ecore_mcast_cmd cmd)
2971 struct ecore_mcast_obj *o = p->mcast_obj;
2972 struct ecore_raw_obj *r = &o->raw;
2974 /* If CLEAR_ONLY has been requested - clear the registry
2975 * and clear a pending bit.
2977 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2978 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2980 /* Set the multicast filter bits before writing it into
2981 * the internal memory.
2984 case ECORE_MCAST_CMD_ADD:
2985 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2988 case ECORE_MCAST_CMD_DEL:
2989 ECORE_MSG(sc, "Invalidating multicast MACs configuration");
2991 /* clear the registry */
2992 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
2993 sizeof(o->registry.aprox_match.vec));
2996 case ECORE_MCAST_CMD_RESTORE:
2997 ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
3001 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
3005 /* Set the mcast filter in the internal memory */
3006 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3007 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3009 /* clear the registry */
3010 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3011 sizeof(o->registry.aprox_match.vec));
3014 r->clear_pending(r);
3016 return ECORE_SUCCESS;
3019 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3021 return o->registry.aprox_match.num_bins_set;
3024 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3027 o->registry.aprox_match.num_bins_set = n;
3030 int ecore_config_mcast(struct bnx2x_softc *sc,
3031 struct ecore_mcast_ramrod_params *p,
3032 enum ecore_mcast_cmd cmd)
3034 struct ecore_mcast_obj *o = p->mcast_obj;
3035 struct ecore_raw_obj *r = &o->raw;
3036 int rc = 0, old_reg_size;
3038 /* This is needed to recover number of currently configured mcast macs
3039 * in case of failure.
3041 old_reg_size = o->get_registry_size(o);
3043 /* Do some calculations and checks */
3044 rc = o->validate(sc, p, cmd);
3048 /* Return if there is no work to do */
3049 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3050 return ECORE_SUCCESS;
3053 (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3054 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3056 /* Enqueue the current command to the pending list if we can't complete
3057 * it in the current iteration
3059 if (r->check_pending(r) ||
3060 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3061 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3065 /* As long as the current command is in a command list we
3066 * don't need to handle it separately.
3068 p->mcast_list_len = 0;
3071 if (!r->check_pending(r)) {
3073 /* Set 'pending' state */
3076 /* Configure the new classification in the chip */
3077 rc = o->config_mcast(sc, p, cmd);
3081 /* Wait for a ramrod completion if was requested */
3082 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3083 rc = o->wait_comp(sc, o);
3089 r->clear_pending(r);
3092 o->revert(sc, p, old_reg_size);
3097 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3099 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3100 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3101 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3104 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3106 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3107 ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3108 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3111 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3113 return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3116 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3118 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3121 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3122 struct ecore_mcast_obj *mcast_obj,
3123 uint8_t mcast_cl_id, uint32_t mcast_cid,
3124 uint8_t func_id, uint8_t engine_id, void *rdata,
3125 ecore_dma_addr_t rdata_mapping, int state,
3126 unsigned long *pstate, ecore_obj_type type)
3128 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3130 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3131 rdata, rdata_mapping, state, pstate, type);
3133 mcast_obj->engine_id = engine_id;
3135 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3137 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3138 mcast_obj->check_sched = ecore_mcast_check_sched;
3139 mcast_obj->set_sched = ecore_mcast_set_sched;
3140 mcast_obj->clear_sched = ecore_mcast_clear_sched;
3142 if (CHIP_IS_E1H(sc)) {
3143 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3144 mcast_obj->enqueue_cmd = NULL;
3145 mcast_obj->hdl_restore = NULL;
3146 mcast_obj->check_pending = ecore_mcast_check_pending;
3148 /* 57711 doesn't send a ramrod, so it has unlimited credit
3151 mcast_obj->max_cmd_len = -1;
3152 mcast_obj->wait_comp = ecore_mcast_wait;
3153 mcast_obj->set_one_rule = NULL;
3154 mcast_obj->validate = ecore_mcast_validate_e1h;
3155 mcast_obj->revert = ecore_mcast_revert_e1h;
3156 mcast_obj->get_registry_size =
3157 ecore_mcast_get_registry_size_aprox;
3158 mcast_obj->set_registry_size =
3159 ecore_mcast_set_registry_size_aprox;
3161 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3162 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3163 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3164 mcast_obj->check_pending = ecore_mcast_check_pending;
3165 mcast_obj->max_cmd_len = 16;
3166 mcast_obj->wait_comp = ecore_mcast_wait;
3167 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3168 mcast_obj->validate = ecore_mcast_validate_e2;
3169 mcast_obj->revert = ecore_mcast_revert_e2;
3170 mcast_obj->get_registry_size =
3171 ecore_mcast_get_registry_size_aprox;
3172 mcast_obj->set_registry_size =
3173 ecore_mcast_set_registry_size_aprox;
3177 /*************************** Credit handling **********************************/
3180 * atomic_add_ifless - add if the result is less than a given value.
3182 * @v: pointer of type ecore_atomic_t
3183 * @a: the amount to add to v...
3184 * @u: ...if (v + a) is less than u.
3186 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3189 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3193 c = ECORE_ATOMIC_READ(v);
3195 if (ECORE_UNLIKELY(c + a >= u))
3198 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3199 if (ECORE_LIKELY(old == c))
3208 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3210 * @v: pointer of type ecore_atomic_t
3211 * @a: the amount to dec from v...
3212 * @u: ...if (v - a) is more or equal than u.
3214 * returns TRUE if (v - a) was more or equal than u, and FALSE
3217 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3221 c = ECORE_ATOMIC_READ(v);
3223 if (ECORE_UNLIKELY(c - a < u))
3226 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3227 if (ECORE_LIKELY(old == c))
3235 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3240 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3246 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3252 /* Don't let to refill if credit + cnt > pool_sz */
3253 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3260 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3265 cur_credit = ECORE_ATOMIC_READ(&o->credit);
3270 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3271 ecore_credit_pool_obj *o,
3272 __rte_unused int cnt)
3277 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3284 /* Find "internal cam-offset" then add to base for this object... */
3285 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3287 /* Skip the current vector if there are no free entries in it */
3288 if (!o->pool_mirror[vec])
3291 /* If we've got here we are going to find a free entry */
3292 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3293 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3295 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3297 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3298 *offset = o->base_pool_offset + idx;
3306 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3309 if (offset < o->base_pool_offset)
3312 offset -= o->base_pool_offset;
3314 if (offset >= o->pool_sz)
3317 /* Return the entry to the pool */
3318 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3323 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3324 ecore_credit_pool_obj *o,
3325 __rte_unused int offset)
3330 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3331 ecore_credit_pool_obj *o,
3332 __rte_unused int *offset)
3339 * ecore_init_credit_pool - initialize credit pool internals.
3342 * @base: Base entry in the CAM to use.
3343 * @credit: pool size.
3345 * If base is negative no CAM entries handling will be performed.
3346 * If credit is negative pool operations will always succeed (unlimited pool).
3349 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3350 int base, int credit)
3352 /* Zero the object first */
3353 ECORE_MEMSET(p, 0, sizeof(*p));
3355 /* Set the table to all 1s */
3356 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3358 /* Init a pool as full */
3359 ECORE_ATOMIC_SET(&p->credit, credit);
3361 /* The total poll size */
3362 p->pool_sz = credit;
3364 p->base_pool_offset = base;
3366 /* Commit the change */
3369 p->check = ecore_credit_pool_check;
3371 /* if pool credit is negative - disable the checks */
3373 p->put = ecore_credit_pool_put;
3374 p->get = ecore_credit_pool_get;
3375 p->put_entry = ecore_credit_pool_put_entry;
3376 p->get_entry = ecore_credit_pool_get_entry;
3378 p->put = ecore_credit_pool_always_TRUE;
3379 p->get = ecore_credit_pool_always_TRUE;
3380 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3381 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3384 /* If base is negative - disable entries handling */
3386 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3387 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3391 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3392 struct ecore_credit_pool_obj *p,
3393 uint8_t func_id, uint8_t func_num)
3396 #define ECORE_CAM_SIZE_EMUL 5
3400 if (CHIP_IS_E1H(sc)) {
3401 /* CAM credit is equally divided between all active functions
3405 if (!CHIP_REV_IS_SLOW(sc))
3406 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3408 cam_sz = ECORE_CAM_SIZE_EMUL;
3409 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3411 /* this should never happen! Block MAC operations. */
3412 ecore_init_credit_pool(p, 0, 0);
3418 * CAM credit is equaly divided between all active functions
3422 if (!CHIP_REV_IS_SLOW(sc))
3423 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3425 cam_sz = ECORE_CAM_SIZE_EMUL;
3427 /* No need for CAM entries handling for 57712 and
3430 ecore_init_credit_pool(p, -1, cam_sz);
3432 /* this should never happen! Block MAC operations. */
3433 ecore_init_credit_pool(p, 0, 0);
3438 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3439 struct ecore_credit_pool_obj *p,
3440 uint8_t func_id, uint8_t func_num)
3442 if (CHIP_IS_E1x(sc)) {
3443 /* There is no VLAN credit in HW on 57711 only
3444 * MAC / MAC-VLAN can be set
3446 ecore_init_credit_pool(p, 0, -1);
3448 /* CAM credit is equally divided between all active functions
3452 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3453 ecore_init_credit_pool(p, func_id * credit, credit);
3455 /* this should never happen! Block VLAN operations. */
3456 ecore_init_credit_pool(p, 0, 0);
3460 /****************** RSS Configuration ******************/
3463 * ecore_setup_rss - configure RSS
3465 * @sc: device handle
3466 * @p: rss configuration
3468 * sends on UPDATE ramrod for that matter.
3470 static int ecore_setup_rss(struct bnx2x_softc *sc,
3471 struct ecore_config_rss_params *p)
3473 struct ecore_rss_config_obj *o = p->rss_obj;
3474 struct ecore_raw_obj *r = &o->raw;
3475 struct eth_rss_update_ramrod_data *data =
3476 (struct eth_rss_update_ramrod_data *)(r->rdata);
3477 uint8_t rss_mode = 0;
3480 ECORE_MEMSET(data, 0, sizeof(*data));
3482 ECORE_MSG(sc, "Configuring RSS");
3484 /* Set an echo field */
3485 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3486 (r->state << ECORE_SWCID_SHIFT));
3489 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3490 rss_mode = ETH_RSS_MODE_DISABLED;
3491 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3492 rss_mode = ETH_RSS_MODE_REGULAR;
3494 data->rss_mode = rss_mode;
3496 ECORE_MSG(sc, "rss_mode=%d", rss_mode);
3498 /* RSS capabilities */
3499 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3500 data->capabilities |=
3501 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3503 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3504 data->capabilities |=
3505 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3507 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3508 data->capabilities |=
3509 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3511 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3512 data->capabilities |=
3513 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3515 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3516 data->capabilities |=
3517 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3519 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3520 data->capabilities |=
3521 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3523 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3524 data->udp_4tuple_dst_port_mask =
3525 ECORE_CPU_TO_LE16(p->tunnel_mask);
3526 data->udp_4tuple_dst_port_value =
3527 ECORE_CPU_TO_LE16(p->tunnel_value);
3531 data->rss_result_mask = p->rss_result_mask;
3534 data->rss_engine_id = o->engine_id;
3536 ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
3538 /* Indirection table */
3539 ECORE_MEMCPY(data->indirection_table, p->ind_table,
3540 T_ETH_INDIRECTION_TABLE_SIZE);
3542 /* Remember the last configuration */
3543 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3546 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3547 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3548 sizeof(data->rss_key));
3549 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3552 /* No need for an explicit memory barrier here as long we would
3553 * need to ensure the ordering of writing to the SPQ element
3554 * and updating of the SPQ producer which involves a memory
3555 * read and we will have to put a full memory barrier there
3556 * (inside ecore_sp_post()).
3560 rc = ecore_sp_post(sc,
3561 RAMROD_CMD_ID_ETH_RSS_UPDATE,
3562 r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3567 return ECORE_PENDING;
3570 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3573 struct ecore_rss_config_obj *o = p->rss_obj;
3574 struct ecore_raw_obj *r = &o->raw;
3576 /* Do nothing if only driver cleanup was requested */
3577 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3578 return ECORE_SUCCESS;
3582 rc = o->config_rss(sc, p);
3584 r->clear_pending(r);
3588 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3589 rc = r->wait_comp(sc, r);
3594 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3595 uint8_t cl_id, uint32_t cid, uint8_t func_id,
3596 uint8_t engine_id, void *rdata,
3597 ecore_dma_addr_t rdata_mapping, int state,
3598 unsigned long *pstate, ecore_obj_type type)
3600 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3601 rdata_mapping, state, pstate, type);
3603 rss_obj->engine_id = engine_id;
3604 rss_obj->config_rss = ecore_setup_rss;
3607 /********************** Queue state object ***********************************/
3610 * ecore_queue_state_change - perform Queue state change transition
3612 * @sc: device handle
3613 * @params: parameters to perform the transition
3615 * returns 0 in case of successfully completed transition, negative error
3616 * code in case of failure, positive (EBUSY) value if there is a completion
3617 * to that is still pending (possible only if RAMROD_COMP_WAIT is
3618 * not set in params->ramrod_flags for asynchronous commands).
3621 int ecore_queue_state_change(struct bnx2x_softc *sc,
3622 struct ecore_queue_state_params *params)
3624 struct ecore_queue_sp_obj *o = params->q_obj;
3625 int rc, pending_bit;
3626 unsigned long *pending = &o->pending;
3628 /* Check that the requested transition is legal */
3629 rc = o->check_transition(sc, o, params);
3631 PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
3636 /* Set "pending" bit */
3637 ECORE_MSG(sc, "pending bit was=%lx", o->pending);
3638 pending_bit = o->set_pending(o, params);
3639 ECORE_MSG(sc, "pending bit now=%lx", o->pending);
3641 /* Don't send a command if only driver cleanup was requested */
3642 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
3643 o->complete_cmd(sc, o, pending_bit);
3646 rc = o->send_cmd(sc, params);
3648 o->next_state = ECORE_Q_STATE_MAX;
3649 ECORE_CLEAR_BIT(pending_bit, pending);
3650 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3654 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
3655 rc = o->wait_comp(sc, o, pending_bit);
3659 return ECORE_SUCCESS;
3663 return ECORE_RET_PENDING(pending_bit, pending);
3666 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3667 struct ecore_queue_state_params *params)
3669 enum ecore_queue_cmd cmd = params->cmd, bit;
3671 /* ACTIVATE and DEACTIVATE commands are implemented on top of
3674 if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3675 bit = ECORE_Q_CMD_UPDATE;
3679 ECORE_SET_BIT(bit, &obj->pending);
3683 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3684 struct ecore_queue_sp_obj *o,
3685 enum ecore_queue_cmd cmd)
3687 return ecore_state_wait(sc, cmd, &o->pending);
3691 * ecore_queue_comp_cmd - complete the state change command.
3693 * @sc: device handle
3697 * Checks that the arrived completion is expected.
3699 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3700 struct ecore_queue_sp_obj *o,
3701 enum ecore_queue_cmd cmd)
3703 unsigned long cur_pending = o->pending;
3705 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3706 PMD_DRV_LOG(ERR, sc,
3707 "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3708 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3709 cur_pending, o->next_state);
3713 if (o->next_tx_only >= o->max_cos)
3714 /* >= because tx only must always be smaller than cos since the
3715 * primary connection supports COS 0
3717 PMD_DRV_LOG(ERR, sc,
3718 "illegal value for next tx_only: %d. max cos was %d",
3719 o->next_tx_only, o->max_cos);
3721 ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
3722 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3724 if (o->next_tx_only) /* print num tx-only if any exist */
3725 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
3726 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3728 o->state = o->next_state;
3729 o->num_tx_only = o->next_tx_only;
3730 o->next_state = ECORE_Q_STATE_MAX;
3732 /* It's important that o->state and o->next_state are
3733 * updated before o->pending.
3737 ECORE_CLEAR_BIT(cmd, &o->pending);
3738 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3740 return ECORE_SUCCESS;
3743 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3745 struct client_init_ramrod_data *data)
3747 struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3751 /* IPv6 TPA supported for E2 and above only */
3752 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3754 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3757 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3758 struct ecore_queue_sp_obj *o,
3759 struct ecore_general_setup_params
3760 *params, struct client_init_general_data
3761 *gen_data, unsigned long *flags)
3763 gen_data->client_id = o->cl_id;
3765 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3766 gen_data->statistics_counter_id = params->stat_id;
3767 gen_data->statistics_en_flg = 1;
3768 gen_data->statistics_zero_flg =
3769 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3771 gen_data->statistics_counter_id =
3772 DISABLE_STATISTIC_COUNTER_ID_VALUE;
3774 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3775 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3776 gen_data->sp_client_id = params->spcl_id;
3777 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3778 gen_data->func_id = o->func_id;
3780 gen_data->cos = params->cos;
3782 gen_data->traffic_type =
3783 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3784 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3786 ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
3787 gen_data->activate_flg, gen_data->cos,
3788 gen_data->statistics_en_flg);
3791 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3792 struct client_init_tx_data *tx_data,
3793 unsigned long *flags)
3795 tx_data->enforce_security_flg =
3796 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3797 tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3798 tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3799 tx_data->tx_switching_flg =
3800 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3801 tx_data->anti_spoofing_flg =
3802 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3803 tx_data->force_default_pri_flg =
3804 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3805 tx_data->refuse_outband_vlan_flg =
3806 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3807 tx_data->tunnel_non_lso_pcsum_location =
3808 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3811 tx_data->tx_status_block_id = params->fw_sb_id;
3812 tx_data->tx_sb_index_number = params->sb_cq_index;
3813 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3815 tx_data->tx_bd_page_base.lo =
3816 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3817 tx_data->tx_bd_page_base.hi =
3818 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3820 /* Don't configure any Tx switching mode during queue SETUP */
3824 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3825 struct client_init_rx_data *rx_data)
3827 /* flow control data */
3828 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3829 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3830 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3831 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3832 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3833 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3834 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3837 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3838 struct client_init_rx_data *rx_data,
3839 unsigned long *flags)
3841 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3842 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3843 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3844 CLIENT_INIT_RX_DATA_TPA_MODE;
3845 rx_data->vmqueue_mode_en_flg = 0;
3847 rx_data->extra_data_over_sgl_en_flg =
3848 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3849 rx_data->cache_line_alignment_log_size = params->cache_line_log;
3850 rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3851 rx_data->client_qzone_id = params->cl_qzone_id;
3852 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3854 /* Always start in DROP_ALL mode */
3855 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3856 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3858 /* We don't set drop flags */
3859 rx_data->drop_ip_cs_err_flg = 0;
3860 rx_data->drop_tcp_cs_err_flg = 0;
3861 rx_data->drop_ttl0_flg = 0;
3862 rx_data->drop_udp_cs_err_flg = 0;
3863 rx_data->inner_vlan_removal_enable_flg =
3864 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3865 rx_data->outer_vlan_removal_enable_flg =
3866 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3867 rx_data->status_block_id = params->fw_sb_id;
3868 rx_data->rx_sb_index_number = params->sb_cq_index;
3869 rx_data->max_tpa_queues = params->max_tpa_queues;
3870 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3871 rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3872 rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3873 rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3874 rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3875 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3878 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3879 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3880 rx_data->is_approx_mcast = 1;
3883 rx_data->rss_engine_id = params->rss_engine_id;
3885 /* silent vlan removal */
3886 rx_data->silent_vlan_removal_flg =
3887 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3888 rx_data->silent_vlan_value =
3889 ECORE_CPU_TO_LE16(params->silent_removal_value);
3890 rx_data->silent_vlan_mask =
3891 ECORE_CPU_TO_LE16(params->silent_removal_mask);
3894 /* initialize the general, tx and rx parts of a queue object */
3895 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3897 struct client_init_ramrod_data *data)
3899 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3900 &cmd_params->params.setup.gen_params,
3902 &cmd_params->params.setup.flags);
3904 ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3905 &data->tx, &cmd_params->params.setup.flags);
3907 ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3908 &data->rx, &cmd_params->params.setup.flags);
3910 ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3914 /* initialize the general and tx parts of a tx-only queue object */
3915 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3917 struct tx_queue_init_ramrod_data *data)
3919 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3920 &cmd_params->params.tx_only.gen_params,
3922 &cmd_params->params.tx_only.flags);
3924 ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3925 &data->tx, &cmd_params->params.tx_only.flags);
3927 ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
3928 cmd_params->q_obj->cids[0],
3929 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3933 * ecore_q_init - init HW/FW queue
3935 * @sc: device handle
3938 * HW/FW initial Queue configuration:
3940 * - CDU context validation
3943 static int ecore_q_init(struct bnx2x_softc *sc,
3944 struct ecore_queue_state_params *params)
3946 struct ecore_queue_sp_obj *o = params->q_obj;
3947 struct ecore_queue_init_params *init = ¶ms->params.init;
3951 /* Tx HC configuration */
3952 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3953 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3954 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3956 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3957 init->tx.sb_cq_index,
3960 &init->tx.flags), hc_usec);
3963 /* Rx HC configuration */
3964 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3965 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3966 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3968 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3969 init->rx.sb_cq_index,
3972 &init->rx.flags), hc_usec);
3975 /* Set CDU context validation values */
3976 for (cos = 0; cos < o->max_cos; cos++) {
3977 ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
3979 ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
3980 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3983 /* As no ramrod is sent, complete the command immediately */
3984 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3989 return ECORE_SUCCESS;
3992 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
3995 struct ecore_queue_sp_obj *o = params->q_obj;
3996 struct client_init_ramrod_data *rdata =
3997 (struct client_init_ramrod_data *)o->rdata;
3998 ecore_dma_addr_t data_mapping = o->rdata_mapping;
3999 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4001 /* Clear the ramrod data */
4002 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4004 /* Fill the ramrod data */
4005 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4007 /* No need for an explicit memory barrier here as long we would
4008 * need to ensure the ordering of writing to the SPQ element
4009 * and updating of the SPQ producer which involves a memory
4010 * read and we will have to put a full memory barrier there
4011 * (inside ecore_sp_post()).
4014 return ecore_sp_post(sc,
4016 o->cids[ECORE_PRIMARY_CID_INDEX],
4017 data_mapping, ETH_CONNECTION_TYPE);
4020 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4021 struct ecore_queue_state_params *params)
4023 struct ecore_queue_sp_obj *o = params->q_obj;
4024 struct client_init_ramrod_data *rdata =
4025 (struct client_init_ramrod_data *)o->rdata;
4026 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4027 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4029 /* Clear the ramrod data */
4030 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4032 /* Fill the ramrod data */
4033 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4034 ecore_q_fill_setup_data_e2(params, rdata);
4036 /* No need for an explicit memory barrier here as long we would
4037 * need to ensure the ordering of writing to the SPQ element
4038 * and updating of the SPQ producer which involves a memory
4039 * read and we will have to put a full memory barrier there
4040 * (inside ecore_sp_post()).
4043 return ecore_sp_post(sc,
4045 o->cids[ECORE_PRIMARY_CID_INDEX],
4046 data_mapping, ETH_CONNECTION_TYPE);
4049 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4052 struct ecore_queue_sp_obj *o = params->q_obj;
4053 struct tx_queue_init_ramrod_data *rdata =
4054 (struct tx_queue_init_ramrod_data *)o->rdata;
4055 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4056 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4057 struct ecore_queue_setup_tx_only_params *tx_only_params =
4058 ¶ms->params.tx_only;
4059 uint8_t cid_index = tx_only_params->cid_index;
4061 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4062 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4063 ECORE_MSG(sc, "sending forward tx-only ramrod");
4065 if (cid_index >= o->max_cos) {
4066 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4067 o->cl_id, cid_index);
4071 ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
4072 tx_only_params->gen_params.cos,
4073 tx_only_params->gen_params.spcl_id);
4075 /* Clear the ramrod data */
4076 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4078 /* Fill the ramrod data */
4079 ecore_q_fill_setup_tx_only(sc, params, rdata);
4082 (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4083 o->cids[cid_index], rdata->general.client_id,
4084 rdata->general.sp_client_id, rdata->general.cos);
4086 /* No need for an explicit memory barrier here as long we would
4087 * need to ensure the ordering of writing to the SPQ element
4088 * and updating of the SPQ producer which involves a memory
4089 * read and we will have to put a full memory barrier there
4090 * (inside ecore_sp_post()).
4093 return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4094 data_mapping, ETH_CONNECTION_TYPE);
4097 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4098 struct ecore_queue_update_params *params,
4099 struct client_update_ramrod_data *data)
4101 /* Client ID of the client to update */
4102 data->client_id = obj->cl_id;
4104 /* Function ID of the client to update */
4105 data->func_id = obj->func_id;
4107 /* Default VLAN value */
4108 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4110 /* Inner VLAN stripping */
4111 data->inner_vlan_removal_enable_flg =
4112 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4113 data->inner_vlan_removal_change_flg =
4114 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4115 ¶ms->update_flags);
4117 /* Outer VLAN stripping */
4118 data->outer_vlan_removal_enable_flg =
4119 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4120 data->outer_vlan_removal_change_flg =
4121 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4122 ¶ms->update_flags);
4124 /* Drop packets that have source MAC that doesn't belong to this
4127 data->anti_spoofing_enable_flg =
4128 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4129 data->anti_spoofing_change_flg =
4130 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4131 ¶ms->update_flags);
4133 /* Activate/Deactivate */
4134 data->activate_flg =
4135 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4136 data->activate_change_flg =
4137 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4139 /* Enable default VLAN */
4140 data->default_vlan_enable_flg =
4141 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4142 data->default_vlan_change_flg =
4143 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4144 ¶ms->update_flags);
4146 /* silent vlan removal */
4147 data->silent_vlan_change_flg =
4148 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4149 ¶ms->update_flags);
4150 data->silent_vlan_removal_flg =
4151 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4152 ¶ms->update_flags);
4153 data->silent_vlan_value =
4154 ECORE_CPU_TO_LE16(params->silent_removal_value);
4155 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4158 data->tx_switching_flg =
4159 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags);
4160 data->tx_switching_change_flg =
4161 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4162 ¶ms->update_flags);
4165 static int ecore_q_send_update(struct bnx2x_softc *sc,
4166 struct ecore_queue_state_params *params)
4168 struct ecore_queue_sp_obj *o = params->q_obj;
4169 struct client_update_ramrod_data *rdata =
4170 (struct client_update_ramrod_data *)o->rdata;
4171 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4172 struct ecore_queue_update_params *update_params =
4173 ¶ms->params.update;
4174 uint8_t cid_index = update_params->cid_index;
4176 if (cid_index >= o->max_cos) {
4177 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4178 o->cl_id, cid_index);
4182 /* Clear the ramrod data */
4183 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4185 /* Fill the ramrod data */
4186 ecore_q_fill_update_data(o, update_params, rdata);
4188 /* No need for an explicit memory barrier here as long we would
4189 * need to ensure the ordering of writing to the SPQ element
4190 * and updating of the SPQ producer which involves a memory
4191 * read and we will have to put a full memory barrier there
4192 * (inside ecore_sp_post()).
4195 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4196 o->cids[cid_index], data_mapping,
4197 ETH_CONNECTION_TYPE);
4201 * ecore_q_send_deactivate - send DEACTIVATE command
4203 * @sc: device handle
4206 * implemented using the UPDATE command.
4208 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4211 struct ecore_queue_update_params *update = ¶ms->params.update;
4213 ECORE_MEMSET(update, 0, sizeof(*update));
4215 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4217 return ecore_q_send_update(sc, params);
4221 * ecore_q_send_activate - send ACTIVATE command
4223 * @sc: device handle
4226 * implemented using the UPDATE command.
4228 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4229 struct ecore_queue_state_params *params)
4231 struct ecore_queue_update_params *update = ¶ms->params.update;
4233 ECORE_MEMSET(update, 0, sizeof(*update));
4235 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4236 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4238 return ecore_q_send_update(sc, params);
4241 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4243 ecore_queue_state_params *params)
4245 /* Not implemented yet. */
4249 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4250 struct ecore_queue_state_params *params)
4252 struct ecore_queue_sp_obj *o = params->q_obj;
4254 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4255 ecore_dma_addr_t data_mapping = 0;
4256 data_mapping = (ecore_dma_addr_t) o->cl_id;
4258 return ecore_sp_post(sc,
4259 RAMROD_CMD_ID_ETH_HALT,
4260 o->cids[ECORE_PRIMARY_CID_INDEX],
4261 data_mapping, ETH_CONNECTION_TYPE);
4264 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4265 struct ecore_queue_state_params *params)
4267 struct ecore_queue_sp_obj *o = params->q_obj;
4268 uint8_t cid_idx = params->params.cfc_del.cid_index;
4270 if (cid_idx >= o->max_cos) {
4271 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4276 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4277 o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4280 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4283 struct ecore_queue_sp_obj *o = params->q_obj;
4284 uint8_t cid_index = params->params.terminate.cid_index;
4286 if (cid_index >= o->max_cos) {
4287 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4288 o->cl_id, cid_index);
4292 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4293 o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4296 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4297 struct ecore_queue_state_params *params)
4299 struct ecore_queue_sp_obj *o = params->q_obj;
4301 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4302 o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4303 ETH_CONNECTION_TYPE);
4306 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4309 switch (params->cmd) {
4310 case ECORE_Q_CMD_INIT:
4311 return ecore_q_init(sc, params);
4312 case ECORE_Q_CMD_SETUP_TX_ONLY:
4313 return ecore_q_send_setup_tx_only(sc, params);
4314 case ECORE_Q_CMD_DEACTIVATE:
4315 return ecore_q_send_deactivate(sc, params);
4316 case ECORE_Q_CMD_ACTIVATE:
4317 return ecore_q_send_activate(sc, params);
4318 case ECORE_Q_CMD_UPDATE:
4319 return ecore_q_send_update(sc, params);
4320 case ECORE_Q_CMD_UPDATE_TPA:
4321 return ecore_q_send_update_tpa(sc, params);
4322 case ECORE_Q_CMD_HALT:
4323 return ecore_q_send_halt(sc, params);
4324 case ECORE_Q_CMD_CFC_DEL:
4325 return ecore_q_send_cfc_del(sc, params);
4326 case ECORE_Q_CMD_TERMINATE:
4327 return ecore_q_send_terminate(sc, params);
4328 case ECORE_Q_CMD_EMPTY:
4329 return ecore_q_send_empty(sc, params);
4331 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4336 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4337 struct ecore_queue_state_params *params)
4339 switch (params->cmd) {
4340 case ECORE_Q_CMD_SETUP:
4341 return ecore_q_send_setup_e1x(sc, params);
4342 case ECORE_Q_CMD_INIT:
4343 case ECORE_Q_CMD_SETUP_TX_ONLY:
4344 case ECORE_Q_CMD_DEACTIVATE:
4345 case ECORE_Q_CMD_ACTIVATE:
4346 case ECORE_Q_CMD_UPDATE:
4347 case ECORE_Q_CMD_UPDATE_TPA:
4348 case ECORE_Q_CMD_HALT:
4349 case ECORE_Q_CMD_CFC_DEL:
4350 case ECORE_Q_CMD_TERMINATE:
4351 case ECORE_Q_CMD_EMPTY:
4352 return ecore_queue_send_cmd_cmn(sc, params);
4354 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4359 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4360 struct ecore_queue_state_params *params)
4362 switch (params->cmd) {
4363 case ECORE_Q_CMD_SETUP:
4364 return ecore_q_send_setup_e2(sc, params);
4365 case ECORE_Q_CMD_INIT:
4366 case ECORE_Q_CMD_SETUP_TX_ONLY:
4367 case ECORE_Q_CMD_DEACTIVATE:
4368 case ECORE_Q_CMD_ACTIVATE:
4369 case ECORE_Q_CMD_UPDATE:
4370 case ECORE_Q_CMD_UPDATE_TPA:
4371 case ECORE_Q_CMD_HALT:
4372 case ECORE_Q_CMD_CFC_DEL:
4373 case ECORE_Q_CMD_TERMINATE:
4374 case ECORE_Q_CMD_EMPTY:
4375 return ecore_queue_send_cmd_cmn(sc, params);
4377 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4383 * ecore_queue_chk_transition - check state machine of a regular Queue
4385 * @sc: device handle
4390 * It both checks if the requested command is legal in a current
4391 * state and, if it's legal, sets a `next_state' in the object
4392 * that will be used in the completion flow to set the `state'
4395 * returns 0 if a requested command is a legal transition,
4396 * ECORE_INVAL otherwise.
4398 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4399 struct ecore_queue_sp_obj *o,
4400 struct ecore_queue_state_params *params)
4402 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4403 enum ecore_queue_cmd cmd = params->cmd;
4404 struct ecore_queue_update_params *update_params =
4405 ¶ms->params.update;
4406 uint8_t next_tx_only = o->num_tx_only;
4408 /* Forget all pending for completion commands if a driver only state
4409 * transition has been requested.
4411 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4413 o->next_state = ECORE_Q_STATE_MAX;
4416 /* Don't allow a next state transition if we are in the middle of
4420 PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx",
4426 case ECORE_Q_STATE_RESET:
4427 if (cmd == ECORE_Q_CMD_INIT)
4428 next_state = ECORE_Q_STATE_INITIALIZED;
4431 case ECORE_Q_STATE_INITIALIZED:
4432 if (cmd == ECORE_Q_CMD_SETUP) {
4433 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4434 ¶ms->params.setup.flags))
4435 next_state = ECORE_Q_STATE_ACTIVE;
4437 next_state = ECORE_Q_STATE_INACTIVE;
4441 case ECORE_Q_STATE_ACTIVE:
4442 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4443 next_state = ECORE_Q_STATE_INACTIVE;
4445 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4446 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4447 next_state = ECORE_Q_STATE_ACTIVE;
4449 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4450 next_state = ECORE_Q_STATE_MULTI_COS;
4454 else if (cmd == ECORE_Q_CMD_HALT)
4455 next_state = ECORE_Q_STATE_STOPPED;
4457 else if (cmd == ECORE_Q_CMD_UPDATE) {
4458 /* If "active" state change is requested, update the
4459 * state accordingly.
4461 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4462 &update_params->update_flags) &&
4463 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4464 &update_params->update_flags))
4465 next_state = ECORE_Q_STATE_INACTIVE;
4467 next_state = ECORE_Q_STATE_ACTIVE;
4471 case ECORE_Q_STATE_MULTI_COS:
4472 if (cmd == ECORE_Q_CMD_TERMINATE)
4473 next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4475 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4476 next_state = ECORE_Q_STATE_MULTI_COS;
4477 next_tx_only = o->num_tx_only + 1;
4480 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4481 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4482 next_state = ECORE_Q_STATE_MULTI_COS;
4484 else if (cmd == ECORE_Q_CMD_UPDATE) {
4485 /* If "active" state change is requested, update the
4486 * state accordingly.
4488 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4489 &update_params->update_flags) &&
4490 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4491 &update_params->update_flags))
4492 next_state = ECORE_Q_STATE_INACTIVE;
4494 next_state = ECORE_Q_STATE_MULTI_COS;
4498 case ECORE_Q_STATE_MCOS_TERMINATED:
4499 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4500 next_tx_only = o->num_tx_only - 1;
4501 if (next_tx_only == 0)
4502 next_state = ECORE_Q_STATE_ACTIVE;
4504 next_state = ECORE_Q_STATE_MULTI_COS;
4508 case ECORE_Q_STATE_INACTIVE:
4509 if (cmd == ECORE_Q_CMD_ACTIVATE)
4510 next_state = ECORE_Q_STATE_ACTIVE;
4512 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4513 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4514 next_state = ECORE_Q_STATE_INACTIVE;
4516 else if (cmd == ECORE_Q_CMD_HALT)
4517 next_state = ECORE_Q_STATE_STOPPED;
4519 else if (cmd == ECORE_Q_CMD_UPDATE) {
4520 /* If "active" state change is requested, update the
4521 * state accordingly.
4523 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4524 &update_params->update_flags) &&
4525 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4526 &update_params->update_flags)) {
4527 if (o->num_tx_only == 0)
4528 next_state = ECORE_Q_STATE_ACTIVE;
4529 else /* tx only queues exist for this queue */
4530 next_state = ECORE_Q_STATE_MULTI_COS;
4532 next_state = ECORE_Q_STATE_INACTIVE;
4536 case ECORE_Q_STATE_STOPPED:
4537 if (cmd == ECORE_Q_CMD_TERMINATE)
4538 next_state = ECORE_Q_STATE_TERMINATED;
4541 case ECORE_Q_STATE_TERMINATED:
4542 if (cmd == ECORE_Q_CMD_CFC_DEL)
4543 next_state = ECORE_Q_STATE_RESET;
4547 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4550 /* Transition is assured */
4551 if (next_state != ECORE_Q_STATE_MAX) {
4552 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4553 state, cmd, next_state);
4554 o->next_state = next_state;
4555 o->next_tx_only = next_tx_only;
4556 return ECORE_SUCCESS;
4559 ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4565 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4567 * @sc: device handle
4571 * It both checks if the requested command is legal in a current
4572 * state and, if it's legal, sets a `next_state' in the object
4573 * that will be used in the completion flow to set the `state'
4576 * returns 0 if a requested command is a legal transition,
4577 * ECORE_INVAL otherwise.
4579 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4580 struct ecore_queue_sp_obj *o,
4581 struct ecore_queue_state_params
4584 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4585 enum ecore_queue_cmd cmd = params->cmd;
4588 case ECORE_Q_STATE_RESET:
4589 if (cmd == ECORE_Q_CMD_INIT)
4590 next_state = ECORE_Q_STATE_INITIALIZED;
4593 case ECORE_Q_STATE_INITIALIZED:
4594 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4595 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4596 ¶ms->params.tx_only.flags))
4597 next_state = ECORE_Q_STATE_ACTIVE;
4599 next_state = ECORE_Q_STATE_INACTIVE;
4603 case ECORE_Q_STATE_ACTIVE:
4604 case ECORE_Q_STATE_INACTIVE:
4605 if (cmd == ECORE_Q_CMD_CFC_DEL)
4606 next_state = ECORE_Q_STATE_RESET;
4610 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4613 /* Transition is assured */
4614 if (next_state != ECORE_Q_STATE_MAX) {
4615 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4616 state, cmd, next_state);
4617 o->next_state = next_state;
4618 return ECORE_SUCCESS;
4621 ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4625 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4626 struct ecore_queue_sp_obj *obj,
4627 uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4628 uint8_t func_id, void *rdata,
4629 ecore_dma_addr_t rdata_mapping, unsigned long type)
4631 ECORE_MEMSET(obj, 0, sizeof(*obj));
4633 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4634 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4636 rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4637 obj->max_cos = cid_cnt;
4639 obj->func_id = func_id;
4641 obj->rdata_mapping = rdata_mapping;
4643 obj->next_state = ECORE_Q_STATE_MAX;
4645 if (CHIP_IS_E1x(sc))
4646 obj->send_cmd = ecore_queue_send_cmd_e1x;
4648 obj->send_cmd = ecore_queue_send_cmd_e2;
4650 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4651 obj->check_transition = ecore_queue_chk_fwd_transition;
4653 obj->check_transition = ecore_queue_chk_transition;
4655 obj->complete_cmd = ecore_queue_comp_cmd;
4656 obj->wait_comp = ecore_queue_wait_comp;
4657 obj->set_pending = ecore_queue_set_pending;
4660 /********************** Function state object *********************************/
4661 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4662 struct ecore_func_sp_obj *o)
4664 /* in the middle of transaction - return INVALID state */
4666 return ECORE_F_STATE_MAX;
4668 /* unsure the order of reading of o->pending and o->state
4669 * o->pending should be read first
4676 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4677 struct ecore_func_sp_obj *o,
4678 enum ecore_func_cmd cmd)
4680 return ecore_state_wait(sc, cmd, &o->pending);
4684 * ecore_func_state_change_comp - complete the state machine transition
4686 * @sc: device handle
4690 * Called on state change transition. Completes the state
4691 * machine transition only - no HW interaction.
4694 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4695 struct ecore_func_sp_obj *o,
4696 enum ecore_func_cmd cmd)
4698 unsigned long cur_pending = o->pending;
4700 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4701 PMD_DRV_LOG(ERR, sc,
4702 "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4703 cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4708 ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
4709 cmd, ECORE_FUNC_ID(sc), o->next_state);
4711 o->state = o->next_state;
4712 o->next_state = ECORE_F_STATE_MAX;
4714 /* It's important that o->state and o->next_state are
4715 * updated before o->pending.
4719 ECORE_CLEAR_BIT(cmd, &o->pending);
4720 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4722 return ECORE_SUCCESS;
4726 * ecore_func_comp_cmd - complete the state change command
4728 * @sc: device handle
4732 * Checks that the arrived completion is expected.
4734 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4735 struct ecore_func_sp_obj *o,
4736 enum ecore_func_cmd cmd)
4738 /* Complete the state machine part first, check if it's a
4741 int rc = ecore_func_state_change_comp(sc, o, cmd);
4746 * ecore_func_chk_transition - perform function state machine transition
4748 * @sc: device handle
4752 * It both checks if the requested command is legal in a current
4753 * state and, if it's legal, sets a `next_state' in the object
4754 * that will be used in the completion flow to set the `state'
4757 * returns 0 if a requested command is a legal transition,
4758 * ECORE_INVAL otherwise.
4760 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4761 struct ecore_func_sp_obj *o,
4762 struct ecore_func_state_params *params)
4764 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4765 enum ecore_func_cmd cmd = params->cmd;
4767 /* Forget all pending for completion commands if a driver only state
4768 * transition has been requested.
4770 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4772 o->next_state = ECORE_F_STATE_MAX;
4775 /* Don't allow a next state transition if we are in the middle of
4782 case ECORE_F_STATE_RESET:
4783 if (cmd == ECORE_F_CMD_HW_INIT)
4784 next_state = ECORE_F_STATE_INITIALIZED;
4787 case ECORE_F_STATE_INITIALIZED:
4788 if (cmd == ECORE_F_CMD_START)
4789 next_state = ECORE_F_STATE_STARTED;
4791 else if (cmd == ECORE_F_CMD_HW_RESET)
4792 next_state = ECORE_F_STATE_RESET;
4795 case ECORE_F_STATE_STARTED:
4796 if (cmd == ECORE_F_CMD_STOP)
4797 next_state = ECORE_F_STATE_INITIALIZED;
4798 /* afex ramrods can be sent only in started mode, and only
4799 * if not pending for function_stop ramrod completion
4800 * for these events - next state remained STARTED.
4802 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4803 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4804 next_state = ECORE_F_STATE_STARTED;
4806 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4807 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4808 next_state = ECORE_F_STATE_STARTED;
4810 /* Switch_update ramrod can be sent in either started or
4811 * tx_stopped state, and it doesn't change the state.
4813 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4814 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4815 next_state = ECORE_F_STATE_STARTED;
4817 else if (cmd == ECORE_F_CMD_TX_STOP)
4818 next_state = ECORE_F_STATE_TX_STOPPED;
4821 case ECORE_F_STATE_TX_STOPPED:
4822 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4823 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4824 next_state = ECORE_F_STATE_TX_STOPPED;
4826 else if (cmd == ECORE_F_CMD_TX_START)
4827 next_state = ECORE_F_STATE_STARTED;
4831 PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
4834 /* Transition is assured */
4835 if (next_state != ECORE_F_STATE_MAX) {
4836 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
4837 state, cmd, next_state);
4838 o->next_state = next_state;
4839 return ECORE_SUCCESS;
4843 "Bad function state transition request: %d %d", state, cmd);
4849 * ecore_func_init_func - performs HW init at function stage
4851 * @sc: device handle
4854 * Init HW when the current phase is
4855 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4858 static int ecore_func_init_func(struct bnx2x_softc *sc,
4859 const struct ecore_func_sp_drv_ops *drv)
4861 return drv->init_hw_func(sc);
4865 * ecore_func_init_port - performs HW init at port stage
4867 * @sc: device handle
4870 * Init HW when the current phase is
4871 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4872 * FUNCTION-only HW blocks.
4875 static int ecore_func_init_port(struct bnx2x_softc *sc,
4876 const struct ecore_func_sp_drv_ops *drv)
4878 int rc = drv->init_hw_port(sc);
4882 return ecore_func_init_func(sc, drv);
4886 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4888 * @sc: device handle
4891 * Init HW when the current phase is
4892 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4893 * PORT-only and FUNCTION-only HW blocks.
4895 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4898 int rc = drv->init_hw_cmn_chip(sc);
4902 return ecore_func_init_port(sc, drv);
4906 * ecore_func_init_cmn - performs HW init at common stage
4908 * @sc: device handle
4911 * Init HW when the current phase is
4912 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4913 * PORT-only and FUNCTION-only HW blocks.
4915 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4916 const struct ecore_func_sp_drv_ops *drv)
4918 int rc = drv->init_hw_cmn(sc);
4922 return ecore_func_init_port(sc, drv);
4925 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4926 struct ecore_func_state_params *params)
4928 uint32_t load_code = params->params.hw_init.load_phase;
4929 struct ecore_func_sp_obj *o = params->f_obj;
4930 const struct ecore_func_sp_drv_ops *drv = o->drv;
4933 ECORE_MSG(sc, "function %d load_code %x",
4934 ECORE_ABS_FUNC_ID(sc), load_code);
4937 rc = drv->init_fw(sc);
4939 PMD_DRV_LOG(ERR, sc, "Error loading firmware");
4943 /* Handle the beginning of COMMON_XXX pases separately... */
4944 switch (load_code) {
4945 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4946 rc = ecore_func_init_cmn_chip(sc, drv);
4951 case FW_MSG_CODE_DRV_LOAD_COMMON:
4952 rc = ecore_func_init_cmn(sc, drv);
4957 case FW_MSG_CODE_DRV_LOAD_PORT:
4958 rc = ecore_func_init_port(sc, drv);
4963 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4964 rc = ecore_func_init_func(sc, drv);
4970 PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
4976 /* In case of success, complete the command immediately: no ramrods
4980 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4986 * ecore_func_reset_func - reset HW at function stage
4988 * @sc: device handle
4991 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4992 * FUNCTION-only HW blocks.
4994 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4997 drv->reset_hw_func(sc);
5001 * ecore_func_reset_port - reser HW at port stage
5003 * @sc: device handle
5006 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5007 * FUNCTION-only and PORT-only HW blocks.
5011 * It's important to call reset_port before reset_func() as the last thing
5012 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5013 * makes impossible any DMAE transactions.
5015 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5018 drv->reset_hw_port(sc);
5019 ecore_func_reset_func(sc, drv);
5023 * ecore_func_reset_cmn - reser HW at common stage
5025 * @sc: device handle
5028 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5029 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5030 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5032 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5033 const struct ecore_func_sp_drv_ops *drv)
5035 ecore_func_reset_port(sc, drv);
5036 drv->reset_hw_cmn(sc);
5039 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5040 struct ecore_func_state_params *params)
5042 uint32_t reset_phase = params->params.hw_reset.reset_phase;
5043 struct ecore_func_sp_obj *o = params->f_obj;
5044 const struct ecore_func_sp_drv_ops *drv = o->drv;
5046 ECORE_MSG(sc, "function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5049 switch (reset_phase) {
5050 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5051 ecore_func_reset_cmn(sc, drv);
5053 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5054 ecore_func_reset_port(sc, drv);
5056 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5057 ecore_func_reset_func(sc, drv);
5060 PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
5065 /* Complete the command immediately: no ramrods have been sent. */
5066 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5068 return ECORE_SUCCESS;
5071 static int ecore_func_send_start(struct bnx2x_softc *sc,
5072 struct ecore_func_state_params *params)
5074 struct ecore_func_sp_obj *o = params->f_obj;
5075 struct function_start_data *rdata =
5076 (struct function_start_data *)o->rdata;
5077 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5078 struct ecore_func_start_params *start_params = ¶ms->params.start;
5080 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5082 /* Fill the ramrod data with provided parameters */
5083 rdata->function_mode = (uint8_t) start_params->mf_mode;
5084 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5085 rdata->path_id = ECORE_PATH_ID(sc);
5086 rdata->network_cos_mode = start_params->network_cos_mode;
5087 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5088 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5091 * No need for an explicit memory barrier here as long we would
5092 * need to ensure the ordering of writing to the SPQ element
5093 * and updating of the SPQ producer which involves a memory
5094 * read and we will have to put a full memory barrier there
5095 * (inside ecore_sp_post()).
5098 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5099 data_mapping, NONE_CONNECTION_TYPE);
5102 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5105 struct ecore_func_sp_obj *o = params->f_obj;
5106 struct function_update_data *rdata =
5107 (struct function_update_data *)o->rdata;
5108 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5109 struct ecore_func_switch_update_params *switch_update_params =
5110 ¶ms->params.switch_update;
5112 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5114 /* Fill the ramrod data with provided parameters */
5115 rdata->tx_switch_suspend_change_flg = 1;
5116 rdata->tx_switch_suspend = switch_update_params->suspend;
5117 rdata->echo = SWITCH_UPDATE;
5119 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5120 data_mapping, NONE_CONNECTION_TYPE);
5123 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5126 struct ecore_func_sp_obj *o = params->f_obj;
5127 struct function_update_data *rdata =
5128 (struct function_update_data *)o->afex_rdata;
5129 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5130 struct ecore_func_afex_update_params *afex_update_params =
5131 ¶ms->params.afex_update;
5133 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5135 /* Fill the ramrod data with provided parameters */
5136 rdata->vif_id_change_flg = 1;
5137 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5138 rdata->afex_default_vlan_change_flg = 1;
5139 rdata->afex_default_vlan =
5140 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5141 rdata->allowed_priorities_change_flg = 1;
5142 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5143 rdata->echo = AFEX_UPDATE;
5145 /* No need for an explicit memory barrier here as long we would
5146 * need to ensure the ordering of writing to the SPQ element
5147 * and updating of the SPQ producer which involves a memory
5148 * read and we will have to put a full memory barrier there
5149 * (inside ecore_sp_post()).
5151 ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5153 rdata->afex_default_vlan, rdata->allowed_priorities);
5155 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5156 data_mapping, NONE_CONNECTION_TYPE);
5160 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5161 struct ecore_func_state_params *params)
5163 struct ecore_func_sp_obj *o = params->f_obj;
5164 struct afex_vif_list_ramrod_data *rdata =
5165 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5166 struct ecore_func_afex_viflists_params *afex_vif_params =
5167 ¶ms->params.afex_viflists;
5168 uint64_t *p_rdata = (uint64_t *) rdata;
5170 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5172 /* Fill the ramrod data with provided parameters */
5173 rdata->vif_list_index =
5174 ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5175 rdata->func_bit_map = afex_vif_params->func_bit_map;
5176 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5177 rdata->func_to_clear = afex_vif_params->func_to_clear;
5179 /* send in echo type of sub command */
5180 rdata->echo = afex_vif_params->afex_vif_list_command;
5182 /* No need for an explicit memory barrier here as long we would
5183 * need to ensure the ordering of writing to the SPQ element
5184 * and updating of the SPQ producer which involves a memory
5185 * read and we will have to put a full memory barrier there
5186 * (inside ecore_sp_post()).
5190 (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5191 rdata->afex_vif_list_command, rdata->vif_list_index,
5192 rdata->func_bit_map, rdata->func_to_clear);
5194 /* this ramrod sends data directly and not through DMA mapping */
5195 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5196 *p_rdata, NONE_CONNECTION_TYPE);
5199 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5200 ecore_func_state_params *params)
5202 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5203 NONE_CONNECTION_TYPE);
5206 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5207 ecore_func_state_params *params)
5209 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5210 NONE_CONNECTION_TYPE);
5213 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5216 struct ecore_func_sp_obj *o = params->f_obj;
5217 struct flow_control_configuration *rdata =
5218 (struct flow_control_configuration *)o->rdata;
5219 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5220 struct ecore_func_tx_start_params *tx_start_params =
5221 ¶ms->params.tx_start;
5224 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5226 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5227 rdata->dcb_version = tx_start_params->dcb_version;
5228 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5230 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5231 rdata->traffic_type_to_priority_cos[i] =
5232 tx_start_params->traffic_type_to_priority_cos[i];
5234 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5235 data_mapping, NONE_CONNECTION_TYPE);
5238 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5239 struct ecore_func_state_params *params)
5241 switch (params->cmd) {
5242 case ECORE_F_CMD_HW_INIT:
5243 return ecore_func_hw_init(sc, params);
5244 case ECORE_F_CMD_START:
5245 return ecore_func_send_start(sc, params);
5246 case ECORE_F_CMD_STOP:
5247 return ecore_func_send_stop(sc, params);
5248 case ECORE_F_CMD_HW_RESET:
5249 return ecore_func_hw_reset(sc, params);
5250 case ECORE_F_CMD_AFEX_UPDATE:
5251 return ecore_func_send_afex_update(sc, params);
5252 case ECORE_F_CMD_AFEX_VIFLISTS:
5253 return ecore_func_send_afex_viflists(sc, params);
5254 case ECORE_F_CMD_TX_STOP:
5255 return ecore_func_send_tx_stop(sc, params);
5256 case ECORE_F_CMD_TX_START:
5257 return ecore_func_send_tx_start(sc, params);
5258 case ECORE_F_CMD_SWITCH_UPDATE:
5259 return ecore_func_send_switch_update(sc, params);
5261 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
5266 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5267 struct ecore_func_sp_obj *obj,
5268 void *rdata, ecore_dma_addr_t rdata_mapping,
5269 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5270 struct ecore_func_sp_drv_ops *drv_iface)
5272 ECORE_MEMSET(obj, 0, sizeof(*obj));
5274 ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5277 obj->rdata_mapping = rdata_mapping;
5278 obj->afex_rdata = afex_rdata;
5279 obj->afex_rdata_mapping = afex_rdata_mapping;
5280 obj->send_cmd = ecore_func_send_cmd;
5281 obj->check_transition = ecore_func_chk_transition;
5282 obj->complete_cmd = ecore_func_comp_cmd;
5283 obj->wait_comp = ecore_func_wait_comp;
5284 obj->drv = drv_iface;
5288 * ecore_func_state_change - perform Function state change transition
5290 * @sc: device handle
5291 * @params: parameters to perform the transaction
5293 * returns 0 in case of successfully completed transition,
5294 * negative error code in case of failure, positive
5295 * (EBUSY) value if there is a completion to that is
5296 * still pending (possible only if RAMROD_COMP_WAIT is
5297 * not set in params->ramrod_flags for asynchronous
5300 int ecore_func_state_change(struct bnx2x_softc *sc,
5301 struct ecore_func_state_params *params)
5303 struct ecore_func_sp_obj *o = params->f_obj;
5305 enum ecore_func_cmd cmd = params->cmd;
5306 unsigned long *pending = &o->pending;
5308 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5310 /* Check that the requested transition is legal */
5311 rc = o->check_transition(sc, o, params);
5312 if ((rc == ECORE_BUSY) &&
5313 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5314 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5315 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5317 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5318 rc = o->check_transition(sc, o, params);
5320 if (rc == ECORE_BUSY) {
5321 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5322 PMD_DRV_LOG(ERR, sc,
5323 "timeout waiting for previous ramrod completion");
5327 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5331 /* Set "pending" bit */
5332 ECORE_SET_BIT(cmd, pending);
5334 /* Don't send a command if only driver cleanup was requested */
5335 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5336 ecore_func_state_change_comp(sc, o, cmd);
5337 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5340 rc = o->send_cmd(sc, params);
5342 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5345 o->next_state = ECORE_F_STATE_MAX;
5346 ECORE_CLEAR_BIT(cmd, pending);
5347 ECORE_SMP_MB_AFTER_CLEAR_BIT();
5351 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5352 rc = o->wait_comp(sc, o, cmd);
5356 return ECORE_SUCCESS;
5360 return ECORE_RET_PENDING(cmd, pending);
5363 /******************************************************************************
5365 * Calculates crc 8 on a word value: polynomial 0-1-2-8
5366 * Code was translated from Verilog.
5368 *****************************************************************************/
5369 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5377 /* split the data into 31 bits */
5378 for (i = 0; i < 32; i++) {
5379 D[i] = (uint8_t) (data & 1);
5383 /* split the crc into 8 bits */
5384 for (i = 0; i < 8; i++) {
5389 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5390 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5392 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5393 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5394 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5395 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5396 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5397 C[0] ^ C[1] ^ C[4] ^ C[5];
5398 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5399 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5400 C[1] ^ C[2] ^ C[5] ^ C[6];
5401 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5402 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5403 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5404 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5405 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5407 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5408 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5409 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5410 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5413 for (i = 0; i < 8; i++) {
5414 crc_res |= (NewCRC[i] << i);
5421 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5426 for (i = 0; i < 8; i++)
5427 crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);