1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015-2018 Cavium Inc.
10 * All rights reserved.
15 #include "ecore_init.h"
17 /**** Exe Queue interfaces ****/
20 * ecore_exe_queue_init - init the Exe Queue object
22 * @o: pointer to the object
24 * @owner: pointer to the owner
25 * @validate: validate function pointer
26 * @optimize: optimize function pointer
27 * @exec: execute function pointer
28 * @get: get function pointer
31 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
32 struct ecore_exe_queue_obj *o,
34 union ecore_qable_obj *owner,
35 exe_q_validate validate,
37 exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
39 ECORE_MEMSET(o, 0, sizeof(*o));
41 ECORE_LIST_INIT(&o->exe_queue);
42 ECORE_LIST_INIT(&o->pending_comp);
44 ECORE_SPIN_LOCK_INIT(&o->lock, sc);
46 o->exe_chunk_len = exe_len;
49 /* Owner specific callbacks */
50 o->validate = validate;
52 o->optimize = optimize;
56 ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
60 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
61 struct ecore_exeq_elem *elem)
63 ECORE_MSG(sc, "Deleting an exe_queue element");
64 ECORE_FREE(sc, elem, sizeof(*elem));
67 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
69 struct ecore_exeq_elem *elem;
72 ECORE_SPIN_LOCK_BH(&o->lock);
74 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
75 struct ecore_exeq_elem) cnt++;
77 ECORE_SPIN_UNLOCK_BH(&o->lock);
83 * ecore_exe_queue_add - add a new element to the execution queue
87 * @cmd: new command to add
88 * @restore: true - do not optimize the command
90 * If the element is optimized or is illegal, frees it.
92 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
93 struct ecore_exe_queue_obj *o,
94 struct ecore_exeq_elem *elem, int restore)
98 ECORE_SPIN_LOCK_BH(&o->lock);
101 /* Try to cancel this element queue */
102 rc = o->optimize(sc, o->owner, elem);
106 /* Check if this request is ok */
107 rc = o->validate(sc, o->owner, elem);
109 ECORE_MSG(sc, "Preamble failed: %d", rc);
114 /* If so, add it to the execution queue */
115 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
117 ECORE_SPIN_UNLOCK_BH(&o->lock);
119 return ECORE_SUCCESS;
122 ecore_exe_queue_free_elem(sc, elem);
124 ECORE_SPIN_UNLOCK_BH(&o->lock);
129 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
132 struct ecore_exeq_elem *elem;
134 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
135 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
136 struct ecore_exeq_elem, link);
138 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
139 ecore_exe_queue_free_elem(sc, elem);
143 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
144 struct ecore_exe_queue_obj *o)
146 ECORE_SPIN_LOCK_BH(&o->lock);
148 __ecore_exe_queue_reset_pending(sc, o);
150 ECORE_SPIN_UNLOCK_BH(&o->lock);
154 * ecore_exe_queue_step - execute one execution chunk atomically
158 * @ramrod_flags: flags
160 * (Should be called while holding the exe_queue->lock).
162 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
163 struct ecore_exe_queue_obj *o,
164 unsigned long *ramrod_flags)
166 struct ecore_exeq_elem *elem, spacer;
169 ECORE_MEMSET(&spacer, 0, sizeof(spacer));
171 /* Next step should not be performed until the current is finished,
172 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
173 * properly clear object internals without sending any command to the FW
174 * which also implies there won't be any completion to clear the
177 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
178 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
180 "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
181 __ecore_exe_queue_reset_pending(sc, o);
183 return ECORE_PENDING;
187 /* Run through the pending commands list and create a next
190 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
191 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
192 struct ecore_exeq_elem, link);
193 ECORE_DBG_BREAK_IF(!elem->cmd_len);
195 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
196 cur_len += elem->cmd_len;
197 /* Prevent from both lists being empty when moving an
198 * element. This will allow the call of
199 * ecore_exe_queue_empty() without locking.
201 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
203 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
204 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
205 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
212 return ECORE_SUCCESS;
214 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
216 /* In case of an error return the commands back to the queue
217 * and reset the pending_comp.
219 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
221 /* If zero is returned, means there are no outstanding pending
222 * completions and we may dismiss the pending list.
224 __ecore_exe_queue_reset_pending(sc, o);
229 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
231 int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
233 /* Don't reorder!!! */
236 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
239 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
243 ECORE_MSG(sc, "Allocating a new exe_queue element");
244 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
247 /************************ raw_obj functions ***********************************/
248 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
251 * !! converts the value returned by ECORE_TEST_BIT such that it
252 * is guaranteed not to be truncated regardless of int definition.
254 * Note we cannot simply define the function's return value type
255 * to match the type returned by ECORE_TEST_BIT, as it varies by
256 * platform/implementation.
259 return ! !ECORE_TEST_BIT(o->state, o->pstate);
262 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
264 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
265 ECORE_CLEAR_BIT(o->state, o->pstate);
266 ECORE_SMP_MB_AFTER_CLEAR_BIT();
269 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
271 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
272 ECORE_SET_BIT(o->state, o->pstate);
273 ECORE_SMP_MB_AFTER_CLEAR_BIT();
277 * ecore_state_wait - wait until the given bit(state) is cleared
280 * @state: state which is to be cleared
281 * @state_p: state buffer
284 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
285 unsigned long *pstate)
287 /* can take a while if any port is running */
290 if (CHIP_REV_IS_EMUL(sc))
293 ECORE_MSG(sc, "waiting for state to become %d", state);
297 bnx2x_intr_legacy(sc, 1);
298 if (!ECORE_TEST_BIT(state, pstate)) {
299 #ifdef ECORE_STOP_ON_ERROR
300 ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
302 return ECORE_SUCCESS;
305 ECORE_WAIT(sc, delay_us);
312 PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
313 #ifdef ECORE_STOP_ON_ERROR
317 return ECORE_TIMEOUT;
320 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
322 return ecore_state_wait(sc, raw->state, raw->pstate);
325 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
326 /* credit handling callbacks */
327 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
329 struct ecore_credit_pool_obj *mp = o->macs_pool;
331 ECORE_DBG_BREAK_IF(!mp);
333 return mp->get_entry(mp, offset);
336 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
338 struct ecore_credit_pool_obj *mp = o->macs_pool;
340 ECORE_DBG_BREAK_IF(!mp);
342 return mp->get(mp, 1);
345 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
347 struct ecore_credit_pool_obj *mp = o->macs_pool;
349 return mp->put_entry(mp, offset);
352 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
354 struct ecore_credit_pool_obj *mp = o->macs_pool;
356 return mp->put(mp, 1);
360 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
364 * @o: vlan_mac object
366 * @details: Non-blocking implementation; should be called under execution
369 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
370 struct ecore_vlan_mac_obj *o)
372 if (o->head_reader) {
373 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
377 ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
378 return ECORE_SUCCESS;
382 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
383 * which wasn't able to run due to a taken lock on vlan mac head list.
386 * @o: vlan_mac object
388 * @details Should be called under execution queue lock; notice it might release
389 * and reclaim it during its run.
391 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
392 struct ecore_vlan_mac_obj *o)
395 unsigned long ramrod_flags = o->saved_ramrod_flags;
397 ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu",
399 o->head_exe_request = FALSE;
400 o->saved_ramrod_flags = 0;
401 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
402 if (rc != ECORE_SUCCESS) {
404 "execution of pending commands failed with rc %d",
406 #ifdef ECORE_STOP_ON_ERROR
413 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
414 * called due to vlan mac head list lock being taken.
417 * @o: vlan_mac object
418 * @ramrod_flags: ramrod flags of missed execution
420 * @details Should be called under execution queue lock.
422 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
423 struct ecore_vlan_mac_obj *o,
424 unsigned long ramrod_flags)
426 o->head_exe_request = TRUE;
427 o->saved_ramrod_flags = ramrod_flags;
428 ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu",
433 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
436 * @o: vlan_mac object
438 * @details Should be called under execution queue lock. Notice if a pending
439 * execution exists, it would perform it - possibly releasing and
440 * reclaiming the execution queue lock.
442 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
443 struct ecore_vlan_mac_obj *o)
445 /* It's possible a new pending execution was added since this writer
446 * executed. If so, execute again. [Ad infinitum]
448 while (o->head_exe_request) {
450 "vlan_mac_lock - writer release encountered a pending request");
451 __ecore_vlan_mac_h_exec_pending(sc, o);
456 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
459 * @o: vlan_mac object
461 * @details Notice if a pending execution exists, it would perform it -
462 * possibly releasing and reclaiming the execution queue lock.
464 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
465 struct ecore_vlan_mac_obj *o)
467 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
468 __ecore_vlan_mac_h_write_unlock(sc, o);
469 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
473 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
476 * @o: vlan_mac object
478 * @details Should be called under the execution queue lock. May sleep. May
479 * release and reclaim execution queue lock during its run.
481 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
482 struct ecore_vlan_mac_obj *o)
484 /* If we got here, we're holding lock --> no WRITER exists */
487 "vlan_mac_lock - locked reader - number %d", o->head_reader);
489 return ECORE_SUCCESS;
493 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
496 * @o: vlan_mac object
498 * @details May sleep. Claims and releases execution queue lock during its run.
500 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
501 struct ecore_vlan_mac_obj *o)
505 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
506 rc = __ecore_vlan_mac_h_read_lock(sc, o);
507 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
513 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
516 * @o: vlan_mac object
518 * @details Should be called under execution queue lock. Notice if a pending
519 * execution exists, it would be performed if this was the last
520 * reader. possibly releasing and reclaiming the execution queue lock.
522 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
523 struct ecore_vlan_mac_obj *o)
525 if (!o->head_reader) {
527 "Need to release vlan mac reader lock, but lock isn't taken");
528 #ifdef ECORE_STOP_ON_ERROR
533 PMD_DRV_LOG(INFO, sc,
534 "vlan_mac_lock - decreased readers to %d",
538 /* It's possible a new pending execution was added, and that this reader
539 * was last - if so we need to execute the command.
541 if (!o->head_reader && o->head_exe_request) {
542 PMD_DRV_LOG(INFO, sc,
543 "vlan_mac_lock - reader release encountered a pending request");
545 /* Writer release will do the trick */
546 __ecore_vlan_mac_h_write_unlock(sc, o);
551 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
554 * @o: vlan_mac object
556 * @details Notice if a pending execution exists, it would be performed if this
557 * was the last reader. Claims and releases the execution queue lock
560 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
561 struct ecore_vlan_mac_obj *o)
563 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
564 __ecore_vlan_mac_h_read_unlock(sc, o);
565 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
569 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
572 * @o: vlan_mac object
573 * @n: number of elements to get
574 * @base: base address for element placement
575 * @stride: stride between elements (in bytes)
577 static int ecore_get_n_elements(struct bnx2x_softc *sc,
578 struct ecore_vlan_mac_obj *o, int n,
579 uint8_t * base, uint8_t stride, uint8_t size)
581 struct ecore_vlan_mac_registry_elem *pos;
582 uint8_t *next = base;
583 int counter = 0, read_lock;
585 ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
586 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
587 if (read_lock != ECORE_SUCCESS)
589 "get_n_elements failed to get vlan mac reader lock; Access without lock");
592 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
593 struct ecore_vlan_mac_registry_elem) {
595 ECORE_MEMCPY(next, &pos->u, size);
598 (sc, "copied element number %d to address %p element was:",
600 next += stride + size;
604 if (read_lock == ECORE_SUCCESS) {
605 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
606 ecore_vlan_mac_h_read_unlock(sc, o);
609 return counter * ETH_ALEN;
612 /* check_add() callbacks */
613 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
614 struct ecore_vlan_mac_obj *o,
615 union ecore_classification_ramrod_data *data)
617 struct ecore_vlan_mac_registry_elem *pos;
619 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
620 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
621 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
623 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
626 /* Check if a requested MAC already exists */
627 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
628 struct ecore_vlan_mac_registry_elem)
629 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
630 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
633 return ECORE_SUCCESS;
636 /* check_del() callbacks */
637 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
643 ecore_classification_ramrod_data
646 struct ecore_vlan_mac_registry_elem *pos;
648 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
649 data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
650 data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
652 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
653 struct ecore_vlan_mac_registry_elem)
654 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
655 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
661 /* check_move() callback */
662 static int ecore_check_move(struct bnx2x_softc *sc,
663 struct ecore_vlan_mac_obj *src_o,
664 struct ecore_vlan_mac_obj *dst_o,
665 union ecore_classification_ramrod_data *data)
667 struct ecore_vlan_mac_registry_elem *pos;
670 /* Check if we can delete the requested configuration from the first
673 pos = src_o->check_del(sc, src_o, data);
675 /* check if configuration can be added */
676 rc = dst_o->check_add(sc, dst_o, data);
678 /* If this classification can not be added (is already set)
679 * or can't be deleted - return an error.
687 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
688 __rte_unused struct ecore_vlan_mac_obj
689 *src_o, __rte_unused struct ecore_vlan_mac_obj
690 *dst_o, __rte_unused union
691 ecore_classification_ramrod_data *data)
696 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
699 struct ecore_raw_obj *raw = &o->raw;
700 uint8_t rx_tx_flag = 0;
702 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
703 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
704 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
706 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
707 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
708 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
713 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
714 int add, unsigned char *dev_addr, int index)
717 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
718 NIG_REG_LLH0_FUNC_MEM;
720 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
723 if (index > ECORE_LLH_CAM_MAX_PF_LINE)
726 ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
727 (add ? "ADD" : "DELETE"), index);
730 /* LLH_FUNC_MEM is a uint64_t WB register */
731 reg_offset += 8 * index;
733 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
734 (dev_addr[4] << 8) | dev_addr[5]);
735 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
737 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
740 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
741 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
745 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
748 * @o: queue for which we want to configure this rule
749 * @add: if TRUE the command is an ADD command, DEL otherwise
750 * @opcode: CLASSIFY_RULE_OPCODE_XXX
751 * @hdr: pointer to a header to setup
754 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
756 struct eth_classify_cmd_header
759 struct ecore_raw_obj *raw = &o->raw;
761 hdr->client_id = raw->cl_id;
762 hdr->func_id = raw->func_id;
764 /* Rx or/and Tx (internal switching) configuration ? */
765 hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
768 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
770 hdr->cmd_general_data |=
771 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
775 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
777 * @cid: connection id
778 * @type: ECORE_FILTER_XXX_PENDING
779 * @hdr: pointer to header to setup
782 * currently we always configure one rule and echo field to contain a CID and an
785 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
788 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
789 (type << ECORE_SWCID_SHIFT));
790 hdr->rule_cnt = (uint8_t) rule_cnt;
793 /* hw_config() callbacks */
794 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
795 struct ecore_vlan_mac_obj *o,
796 struct ecore_exeq_elem *elem, int rule_idx,
797 __rte_unused int cam_offset)
799 struct ecore_raw_obj *raw = &o->raw;
800 struct eth_classify_rules_ramrod_data *data =
801 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
802 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
803 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
804 int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
805 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
806 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
808 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
809 * relevant. In addition, current implementation is tuned for a
812 * When multiple unicast ETH MACs PF configuration in switch
813 * independent mode is required (NetQ, multiple netdev MACs,
814 * etc.), consider better utilisation of 8 per function MAC
815 * entries in the LLH register. There is also
816 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
817 * total number of CAM entries to 16.
819 * Currently we won't configure NIG for MACs other than a primary ETH
820 * MAC and iSCSI L2 MAC.
822 * If this MAC is moving from one Queue to another, no need to change
825 if (cmd != ECORE_VLAN_MAC_MOVE) {
826 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
827 ecore_set_mac_in_nig(sc, add, mac,
828 ECORE_LLH_CAM_ISCSI_ETH_LINE);
829 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
830 ecore_set_mac_in_nig(sc, add, mac,
831 ECORE_LLH_CAM_ETH_LINE);
834 /* Reset the ramrod data buffer for the first rule */
836 ECORE_MEMSET(data, 0, sizeof(*data));
838 /* Setup a command header */
839 ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
840 &rule_entry->mac.header);
842 ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
843 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
844 mac[4], mac[5], raw->cl_id);
846 /* Set a MAC itself */
847 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
848 &rule_entry->mac.mac_mid,
849 &rule_entry->mac.mac_lsb, mac);
850 rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
852 /* MOVE: Add a rule that will add this MAC to the target Queue */
853 if (cmd == ECORE_VLAN_MAC_MOVE) {
857 /* Setup ramrod data */
858 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
859 vlan_mac.target_obj, TRUE,
860 CLASSIFY_RULE_OPCODE_MAC,
861 &rule_entry->mac.header);
863 /* Set a MAC itself */
864 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
865 &rule_entry->mac.mac_mid,
866 &rule_entry->mac.mac_lsb, mac);
867 rule_entry->mac.inner_mac =
868 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
871 /* Set the ramrod data header */
872 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
877 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
882 * @cam_offset: offset in cam memory
883 * @hdr: pointer to a header to setup
887 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
888 *o, int type, int cam_offset, struct mac_configuration_hdr
891 struct ecore_raw_obj *r = &o->raw;
894 hdr->offset = (uint8_t) cam_offset;
895 hdr->client_id = ECORE_CPU_TO_LE16(0xff);
896 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
897 (type << ECORE_SWCID_SHIFT));
900 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
901 *o, int add, int opcode,
903 uint16_t vlan_id, struct
904 mac_configuration_entry
907 struct ecore_raw_obj *r = &o->raw;
908 uint32_t cl_bit_vec = (1 << r->cl_id);
910 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
911 cfg_entry->pf_id = r->func_id;
912 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
915 ECORE_SET_FLAG(cfg_entry->flags,
916 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
917 T_ETH_MAC_COMMAND_SET);
918 ECORE_SET_FLAG(cfg_entry->flags,
919 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
922 /* Set a MAC in a ramrod data */
923 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
924 &cfg_entry->middle_mac_addr,
925 &cfg_entry->lsb_mac_addr, mac);
927 ECORE_SET_FLAG(cfg_entry->flags,
928 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
929 T_ETH_MAC_COMMAND_INVALIDATE);
932 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
934 struct ecore_vlan_mac_obj *o,
935 int type, int cam_offset,
936 int add, uint8_t * mac,
937 uint16_t vlan_id, int opcode,
938 struct mac_configuration_cmd
941 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
943 ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
944 ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
947 ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
948 (add ? "setting" : "clearing"),
949 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
950 o->raw.cl_id, cam_offset);
954 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
957 * @o: ecore_vlan_mac_obj
958 * @elem: ecore_exeq_elem
959 * @rule_idx: rule_idx
960 * @cam_offset: cam_offset
962 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
963 struct ecore_vlan_mac_obj *o,
964 struct ecore_exeq_elem *elem,
965 __rte_unused int rule_idx, int cam_offset)
967 struct ecore_raw_obj *raw = &o->raw;
968 struct mac_configuration_cmd *config =
969 (struct mac_configuration_cmd *)(raw->rdata);
970 /* 57711 do not support MOVE command,
971 * so it's either ADD or DEL
973 int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
976 /* Reset the ramrod data buffer */
977 ECORE_MEMSET(config, 0, sizeof(*config));
979 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
981 elem->cmd_data.vlan_mac.u.mac.mac, 0,
982 ETH_VLAN_FILTER_ANY_VLAN, config);
986 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
989 * @p: command parameters
990 * @ppos: pointer to the cookie
992 * reconfigure next MAC/VLAN/VLAN-MAC element from the
993 * previously configured elements list.
995 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
998 * pointer to the cookie - that should be given back in the next call to make
999 * function handle the next element. If *ppos is set to NULL it will restart the
1000 * iterator. If returned *ppos == NULL this means that the last element has been
1004 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1005 struct ecore_vlan_mac_ramrod_params *p,
1006 struct ecore_vlan_mac_registry_elem **ppos)
1008 struct ecore_vlan_mac_registry_elem *pos;
1009 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1011 /* If list is empty - there is nothing to do here */
1012 if (ECORE_LIST_IS_EMPTY(&o->head)) {
1017 /* make a step... */
1019 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1020 ecore_vlan_mac_registry_elem,
1023 *ppos = ECORE_LIST_NEXT(*ppos, link,
1024 struct ecore_vlan_mac_registry_elem);
1028 /* If it's the last step - return NULL */
1029 if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1032 /* Prepare a 'user_req' */
1033 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1035 /* Set the command */
1036 p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1038 /* Set vlan_mac_flags */
1039 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1041 /* Set a restore bit */
1042 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1044 return ecore_config_vlan_mac(sc, p);
1047 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1048 * pointer to an element with a specific criteria and NULL if such an element
1049 * hasn't been found.
1051 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1052 struct ecore_exeq_elem *elem)
1054 struct ecore_exeq_elem *pos;
1055 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1057 /* Check pending for execution commands */
1058 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1059 struct ecore_exeq_elem)
1060 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1062 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1069 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1071 * @sc: device handle
1072 * @qo: ecore_qable_obj
1073 * @elem: ecore_exeq_elem
1075 * Checks that the requested configuration can be added. If yes and if
1076 * requested, consume CAM credit.
1078 * The 'validate' is run after the 'optimize'.
1081 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1082 union ecore_qable_obj *qo,
1083 struct ecore_exeq_elem *elem)
1085 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1086 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1089 /* Check the registry */
1090 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1093 "ADD command is not allowed considering current registry state.");
1097 /* Check if there is a pending ADD command for this
1098 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1100 if (exeq->get(exeq, elem)) {
1101 ECORE_MSG(sc, "There is a pending ADD command already");
1102 return ECORE_EXISTS;
1105 /* Consume the credit if not requested not to */
1106 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1107 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1111 return ECORE_SUCCESS;
1115 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1117 * @sc: device handle
1118 * @qo: quable object to check
1119 * @elem: element that needs to be deleted
1121 * Checks that the requested configuration can be deleted. If yes and if
1122 * requested, returns a CAM credit.
1124 * The 'validate' is run after the 'optimize'.
1126 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1127 union ecore_qable_obj *qo,
1128 struct ecore_exeq_elem *elem)
1130 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1131 struct ecore_vlan_mac_registry_elem *pos;
1132 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1133 struct ecore_exeq_elem query_elem;
1135 /* If this classification can not be deleted (doesn't exist)
1136 * - return a ECORE_EXIST.
1138 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1141 "DEL command is not allowed considering current registry state");
1142 return ECORE_EXISTS;
1145 /* Check if there are pending DEL or MOVE commands for this
1146 * MAC/VLAN/VLAN-MAC. Return an error if so.
1148 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1150 /* Check for MOVE commands */
1151 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1152 if (exeq->get(exeq, &query_elem)) {
1153 PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
1157 /* Check for DEL commands */
1158 if (exeq->get(exeq, elem)) {
1159 ECORE_MSG(sc, "There is a pending DEL command already");
1160 return ECORE_EXISTS;
1163 /* Return the credit to the credit pool if not requested not to */
1164 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1165 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1166 o->put_credit(o))) {
1167 PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
1171 return ECORE_SUCCESS;
1175 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1177 * @sc: device handle
1178 * @qo: quable object to check (source)
1179 * @elem: element that needs to be moved
1181 * Checks that the requested configuration can be moved. If yes and if
1182 * requested, returns a CAM credit.
1184 * The 'validate' is run after the 'optimize'.
1186 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1187 union ecore_qable_obj *qo,
1188 struct ecore_exeq_elem *elem)
1190 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1191 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1192 struct ecore_exeq_elem query_elem;
1193 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1194 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1196 /* Check if we can perform this operation based on the current registry
1199 if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1201 "MOVE command is not allowed considering current registry state");
1205 /* Check if there is an already pending DEL or MOVE command for the
1206 * source object or ADD command for a destination object. Return an
1209 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1211 /* Check DEL on source */
1212 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1213 if (src_exeq->get(src_exeq, &query_elem)) {
1214 PMD_DRV_LOG(ERR, sc,
1215 "There is a pending DEL command on the source queue already");
1219 /* Check MOVE on source */
1220 if (src_exeq->get(src_exeq, elem)) {
1221 ECORE_MSG(sc, "There is a pending MOVE command already");
1222 return ECORE_EXISTS;
1225 /* Check ADD on destination */
1226 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1227 if (dest_exeq->get(dest_exeq, &query_elem)) {
1228 PMD_DRV_LOG(ERR, sc,
1229 "There is a pending ADD command on the destination queue already");
1233 /* Consume the credit if not requested not to */
1234 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1235 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1236 dest_o->get_credit(dest_o)))
1239 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1240 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1241 src_o->put_credit(src_o))) {
1242 /* return the credit taken from dest... */
1243 dest_o->put_credit(dest_o);
1247 return ECORE_SUCCESS;
1250 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1251 union ecore_qable_obj *qo,
1252 struct ecore_exeq_elem *elem)
1254 switch (elem->cmd_data.vlan_mac.cmd) {
1255 case ECORE_VLAN_MAC_ADD:
1256 return ecore_validate_vlan_mac_add(sc, qo, elem);
1257 case ECORE_VLAN_MAC_DEL:
1258 return ecore_validate_vlan_mac_del(sc, qo, elem);
1259 case ECORE_VLAN_MAC_MOVE:
1260 return ecore_validate_vlan_mac_move(sc, qo, elem);
1266 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1267 union ecore_qable_obj *qo,
1268 struct ecore_exeq_elem *elem)
1272 /* If consumption wasn't required, nothing to do */
1273 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1274 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1275 return ECORE_SUCCESS;
1277 switch (elem->cmd_data.vlan_mac.cmd) {
1278 case ECORE_VLAN_MAC_ADD:
1279 case ECORE_VLAN_MAC_MOVE:
1280 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1282 case ECORE_VLAN_MAC_DEL:
1283 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1292 return ECORE_SUCCESS;
1296 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1298 * @sc: device handle
1299 * @o: ecore_vlan_mac_obj
1302 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1303 struct ecore_vlan_mac_obj *o)
1306 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1307 struct ecore_raw_obj *raw = &o->raw;
1310 /* Wait for the current command to complete */
1311 rc = raw->wait_comp(sc, raw);
1315 /* Wait until there are no pending commands */
1316 if (!ecore_exe_queue_empty(exeq))
1317 ECORE_WAIT(sc, 1000);
1319 return ECORE_SUCCESS;
1322 return ECORE_TIMEOUT;
1325 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1326 struct ecore_vlan_mac_obj *o,
1327 unsigned long *ramrod_flags)
1329 int rc = ECORE_SUCCESS;
1331 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1333 ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
1334 rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1336 if (rc != ECORE_SUCCESS) {
1337 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1339 /** Calling function should not diffrentiate between this case
1340 * and the case in which there is already a pending ramrod
1344 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1346 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1352 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1354 * @sc: device handle
1355 * @o: ecore_vlan_mac_obj
1357 * @cont: if TRUE schedule next execution chunk
1360 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1361 struct ecore_vlan_mac_obj *o,
1362 union event_ring_elem *cqe,
1363 unsigned long *ramrod_flags)
1365 struct ecore_raw_obj *r = &o->raw;
1368 /* Reset pending list */
1369 ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1372 r->clear_pending(r);
1374 /* If ramrod failed this is most likely a SW bug */
1375 if (cqe->message.error)
1378 /* Run the next bulk of pending commands if requested */
1379 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1380 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1385 /* If there is more work to do return PENDING */
1386 if (!ecore_exe_queue_empty(&o->exe_queue))
1387 return ECORE_PENDING;
1389 return ECORE_SUCCESS;
1393 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1395 * @sc: device handle
1396 * @o: ecore_qable_obj
1397 * @elem: ecore_exeq_elem
1399 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1400 union ecore_qable_obj *qo,
1401 struct ecore_exeq_elem *elem)
1403 struct ecore_exeq_elem query, *pos;
1404 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1405 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1407 ECORE_MEMCPY(&query, elem, sizeof(query));
1409 switch (elem->cmd_data.vlan_mac.cmd) {
1410 case ECORE_VLAN_MAC_ADD:
1411 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1413 case ECORE_VLAN_MAC_DEL:
1414 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1417 /* Don't handle anything other than ADD or DEL */
1421 /* If we found the appropriate element - delete it */
1422 pos = exeq->get(exeq, &query);
1425 /* Return the credit of the optimized command */
1426 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1427 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1428 if ((query.cmd_data.vlan_mac.cmd ==
1429 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1430 PMD_DRV_LOG(ERR, sc,
1431 "Failed to return the credit for the optimized ADD command");
1433 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1434 PMD_DRV_LOG(ERR, sc,
1435 "Failed to recover the credit from the optimized DEL command");
1440 ECORE_MSG(sc, "Optimizing %s command",
1441 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1444 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1445 ecore_exe_queue_free_elem(sc, pos);
1453 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1455 * @sc: device handle
1461 * prepare a registry element according to the current command request.
1463 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1464 struct ecore_vlan_mac_obj *o,
1465 struct ecore_exeq_elem *elem,
1467 ecore_vlan_mac_registry_elem
1470 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1471 struct ecore_vlan_mac_registry_elem *reg_elem;
1473 /* Allocate a new registry element if needed. */
1475 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1476 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1480 /* Get a new CAM offset */
1481 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1482 /* This shall never happen, because we have checked the
1483 * CAM availability in the 'validate'.
1485 ECORE_DBG_BREAK_IF(1);
1486 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1490 ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
1492 /* Set a VLAN-MAC data */
1493 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u,
1494 sizeof(reg_elem->u));
1496 /* Copy the flags (needed for DEL and RESTORE flows) */
1497 reg_elem->vlan_mac_flags =
1498 elem->cmd_data.vlan_mac.vlan_mac_flags;
1499 } else /* DEL, RESTORE */
1500 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1503 return ECORE_SUCCESS;
1507 * ecore_execute_vlan_mac - execute vlan mac command
1509 * @sc: device handle
1514 * go and send a ramrod!
1516 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1517 union ecore_qable_obj *qo,
1518 ecore_list_t * exe_chunk,
1519 unsigned long *ramrod_flags)
1521 struct ecore_exeq_elem *elem;
1522 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1523 struct ecore_raw_obj *r = &o->raw;
1525 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1526 int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1527 struct ecore_vlan_mac_registry_elem *reg_elem;
1528 enum ecore_vlan_mac_cmd cmd;
1530 /* If DRIVER_ONLY execution is requested, cleanup a registry
1531 * and exit. Otherwise send a ramrod to FW.
1538 /* Fill the ramrod data */
1539 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1540 struct ecore_exeq_elem) {
1541 cmd = elem->cmd_data.vlan_mac.cmd;
1542 /* We will add to the target object in MOVE command, so
1543 * change the object for a CAM search.
1545 if (cmd == ECORE_VLAN_MAC_MOVE)
1546 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1550 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1556 ECORE_DBG_BREAK_IF(!reg_elem);
1558 /* Push a new entry into the registry */
1560 ((cmd == ECORE_VLAN_MAC_ADD) ||
1561 (cmd == ECORE_VLAN_MAC_MOVE)))
1562 ECORE_LIST_PUSH_HEAD(®_elem->link,
1565 /* Configure a single command in a ramrod data buffer */
1566 o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1568 /* MOVE command consumes 2 entries in the ramrod data */
1569 if (cmd == ECORE_VLAN_MAC_MOVE)
1576 * No need for an explicit memory barrier here as long we would
1577 * need to ensure the ordering of writing to the SPQ element
1578 * and updating of the SPQ producer which involves a memory
1579 * read and we will have to put a full memory barrier there
1580 * (inside ecore_sp_post()).
1583 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1584 r->rdata_mapping, ETH_CONNECTION_TYPE);
1589 /* Now, when we are done with the ramrod - clean up the registry */
1590 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1591 cmd = elem->cmd_data.vlan_mac.cmd;
1592 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1593 reg_elem = o->check_del(sc, o,
1594 &elem->cmd_data.vlan_mac.u);
1596 ECORE_DBG_BREAK_IF(!reg_elem);
1598 o->put_cam_offset(o, reg_elem->cam_offset);
1599 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head);
1600 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1605 return ECORE_PENDING;
1607 return ECORE_SUCCESS;
1610 r->clear_pending(r);
1612 /* Cleanup a registry in case of a failure */
1613 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1614 cmd = elem->cmd_data.vlan_mac.cmd;
1616 if (cmd == ECORE_VLAN_MAC_MOVE)
1617 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1621 /* Delete all newly added above entries */
1623 ((cmd == ECORE_VLAN_MAC_ADD) ||
1624 (cmd == ECORE_VLAN_MAC_MOVE))) {
1625 reg_elem = o->check_del(sc, cam_obj,
1626 &elem->cmd_data.vlan_mac.u);
1628 ECORE_LIST_REMOVE_ENTRY(®_elem->link,
1630 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1638 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1639 ecore_vlan_mac_ramrod_params *p)
1641 struct ecore_exeq_elem *elem;
1642 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1643 int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1645 /* Allocate the execution queue element */
1646 elem = ecore_exe_queue_alloc_elem(sc);
1650 /* Set the command 'length' */
1651 switch (p->user_req.cmd) {
1652 case ECORE_VLAN_MAC_MOVE:
1659 /* Fill the object specific info */
1660 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1661 sizeof(p->user_req));
1663 /* Try to add a new command to the pending list */
1664 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1668 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1670 * @sc: device handle
1674 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1675 struct ecore_vlan_mac_ramrod_params *p)
1677 int rc = ECORE_SUCCESS;
1678 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1679 unsigned long *ramrod_flags = &p->ramrod_flags;
1680 int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1681 struct ecore_raw_obj *raw = &o->raw;
1684 * Add new elements to the execution list for commands that require it.
1687 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1692 /* If nothing will be executed further in this iteration we want to
1693 * return PENDING if there are pending commands
1695 if (!ecore_exe_queue_empty(&o->exe_queue))
1698 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1700 "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1701 raw->clear_pending(raw);
1704 /* Execute commands if required */
1705 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1706 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1707 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1713 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1714 * then user want to wait until the last command is done.
1716 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1717 /* Wait maximum for the current exe_queue length iterations plus
1718 * one (for the current pending command).
1720 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1722 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1725 /* Wait for the current command to complete */
1726 rc = raw->wait_comp(sc, raw);
1730 /* Make a next step */
1731 rc = __ecore_vlan_mac_execute_step(sc,
1738 return ECORE_SUCCESS;
1745 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1747 * @sc: device handle
1750 * @ramrod_flags: execution flags to be used for this deletion
1752 * if the last operation has completed successfully and there are no
1753 * more elements left, positive value if the last operation has completed
1754 * successfully and there are more previously configured elements, negative
1755 * value is current operation has failed.
1757 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1758 struct ecore_vlan_mac_obj *o,
1759 unsigned long *vlan_mac_flags,
1760 unsigned long *ramrod_flags)
1762 struct ecore_vlan_mac_registry_elem *pos = NULL;
1763 int rc = 0, read_lock;
1764 struct ecore_vlan_mac_ramrod_params p;
1765 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1766 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1768 /* Clear pending commands first */
1770 ECORE_SPIN_LOCK_BH(&exeq->lock);
1772 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1773 &exeq->exe_queue, link,
1774 struct ecore_exeq_elem) {
1775 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1777 rc = exeq->remove(sc, exeq->owner, exeq_pos);
1779 PMD_DRV_LOG(ERR, sc, "Failed to remove command");
1780 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1783 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1785 ecore_exe_queue_free_elem(sc, exeq_pos);
1789 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1791 /* Prepare a command request */
1792 ECORE_MEMSET(&p, 0, sizeof(p));
1794 p.ramrod_flags = *ramrod_flags;
1795 p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1797 /* Add all but the last VLAN-MAC to the execution queue without actually
1798 * execution anything.
1800 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1801 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1802 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1804 ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1805 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1806 if (read_lock != ECORE_SUCCESS)
1809 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1810 struct ecore_vlan_mac_registry_elem) {
1811 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1812 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1813 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1814 rc = ecore_config_vlan_mac(sc, &p);
1816 PMD_DRV_LOG(ERR, sc,
1817 "Failed to add a new DEL command");
1818 ecore_vlan_mac_h_read_unlock(sc, o);
1824 ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1825 ecore_vlan_mac_h_read_unlock(sc, o);
1827 p.ramrod_flags = *ramrod_flags;
1828 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1830 return ecore_config_vlan_mac(sc, &p);
1833 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1834 uint32_t cid, uint8_t func_id,
1836 ecore_dma_addr_t rdata_mapping, int state,
1837 unsigned long *pstate, ecore_obj_type type)
1839 raw->func_id = func_id;
1843 raw->rdata_mapping = rdata_mapping;
1845 raw->pstate = pstate;
1846 raw->obj_type = type;
1847 raw->check_pending = ecore_raw_check_pending;
1848 raw->clear_pending = ecore_raw_clear_pending;
1849 raw->set_pending = ecore_raw_set_pending;
1850 raw->wait_comp = ecore_raw_wait;
1853 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1854 uint8_t cl_id, uint32_t cid,
1855 uint8_t func_id, void *rdata,
1856 ecore_dma_addr_t rdata_mapping,
1857 int state, unsigned long *pstate,
1858 ecore_obj_type type,
1859 struct ecore_credit_pool_obj
1860 *macs_pool, struct ecore_credit_pool_obj
1863 ECORE_LIST_INIT(&o->head);
1865 o->head_exe_request = FALSE;
1866 o->saved_ramrod_flags = 0;
1868 o->macs_pool = macs_pool;
1869 o->vlans_pool = vlans_pool;
1871 o->delete_all = ecore_vlan_mac_del_all;
1872 o->restore = ecore_vlan_mac_restore;
1873 o->complete = ecore_complete_vlan_mac;
1874 o->wait = ecore_wait_vlan_mac;
1876 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1877 state, pstate, type);
1880 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1881 struct ecore_vlan_mac_obj *mac_obj,
1882 uint8_t cl_id, uint32_t cid, uint8_t func_id,
1883 void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1884 unsigned long *pstate, ecore_obj_type type,
1885 struct ecore_credit_pool_obj *macs_pool)
1887 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1889 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1890 rdata_mapping, state, pstate, type,
1893 /* CAM credit pool handling */
1894 mac_obj->get_credit = ecore_get_credit_mac;
1895 mac_obj->put_credit = ecore_put_credit_mac;
1896 mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1897 mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1899 if (CHIP_IS_E1x(sc)) {
1900 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1901 mac_obj->check_del = ecore_check_mac_del;
1902 mac_obj->check_add = ecore_check_mac_add;
1903 mac_obj->check_move = ecore_check_move_always_err;
1904 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1907 ecore_exe_queue_init(sc,
1908 &mac_obj->exe_queue, 1, qable_obj,
1909 ecore_validate_vlan_mac,
1910 ecore_remove_vlan_mac,
1911 ecore_optimize_vlan_mac,
1912 ecore_execute_vlan_mac,
1913 ecore_exeq_get_mac);
1915 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1916 mac_obj->check_del = ecore_check_mac_del;
1917 mac_obj->check_add = ecore_check_mac_add;
1918 mac_obj->check_move = ecore_check_move;
1919 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1920 mac_obj->get_n_elements = ecore_get_n_elements;
1923 ecore_exe_queue_init(sc,
1924 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1925 qable_obj, ecore_validate_vlan_mac,
1926 ecore_remove_vlan_mac,
1927 ecore_optimize_vlan_mac,
1928 ecore_execute_vlan_mac,
1929 ecore_exeq_get_mac);
1933 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1934 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1935 tstorm_eth_mac_filter_config
1936 *mac_filters, uint16_t pf_id)
1938 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1940 uint32_t addr = BAR_TSTRORM_INTMEM +
1941 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1943 ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1946 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1947 struct ecore_rx_mode_ramrod_params *p)
1949 /* update the sc MAC filter structure */
1950 uint32_t mask = (1 << p->cl_id);
1952 struct tstorm_eth_mac_filter_config *mac_filters =
1953 (struct tstorm_eth_mac_filter_config *)p->rdata;
1955 /* initial setting is drop-all */
1956 uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1957 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1958 uint8_t unmatched_unicast = 0;
1960 /* In e1x there we only take into account rx accept flag since tx switching
1962 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1963 /* accept matched ucast */
1966 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1967 /* accept matched mcast */
1970 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1971 /* accept all mcast */
1975 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1976 /* accept all mcast */
1980 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1981 /* accept (all) bcast */
1983 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1984 /* accept unmatched unicasts */
1985 unmatched_unicast = 1;
1987 mac_filters->ucast_drop_all = drop_all_ucast ?
1988 mac_filters->ucast_drop_all | mask :
1989 mac_filters->ucast_drop_all & ~mask;
1991 mac_filters->mcast_drop_all = drop_all_mcast ?
1992 mac_filters->mcast_drop_all | mask :
1993 mac_filters->mcast_drop_all & ~mask;
1995 mac_filters->ucast_accept_all = accp_all_ucast ?
1996 mac_filters->ucast_accept_all | mask :
1997 mac_filters->ucast_accept_all & ~mask;
1999 mac_filters->mcast_accept_all = accp_all_mcast ?
2000 mac_filters->mcast_accept_all | mask :
2001 mac_filters->mcast_accept_all & ~mask;
2003 mac_filters->bcast_accept_all = accp_all_bcast ?
2004 mac_filters->bcast_accept_all | mask :
2005 mac_filters->bcast_accept_all & ~mask;
2007 mac_filters->unmatched_unicast = unmatched_unicast ?
2008 mac_filters->unmatched_unicast | mask :
2009 mac_filters->unmatched_unicast & ~mask;
2011 ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2012 "accp_mcast 0x%xaccp_bcast 0x%x",
2013 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2014 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2015 mac_filters->bcast_accept_all);
2017 /* write the MAC filter structure */
2018 __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2020 /* The operation is completed */
2021 ECORE_CLEAR_BIT(p->state, p->pstate);
2022 ECORE_SMP_MB_AFTER_CLEAR_BIT();
2024 return ECORE_SUCCESS;
2027 /* Setup ramrod data */
2028 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2029 *hdr, uint8_t rule_cnt)
2031 hdr->echo = ECORE_CPU_TO_LE32(cid);
2032 hdr->rule_cnt = rule_cnt;
2035 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2036 *cmd, int clear_accept_all)
2040 /* start with 'drop-all' */
2041 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2042 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2044 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2045 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2047 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2048 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2050 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2051 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2052 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2055 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2056 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2057 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2059 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2060 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2062 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2063 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2064 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2066 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2067 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2069 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2070 if (clear_accept_all) {
2071 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2072 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2073 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2074 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2077 cmd->state = ECORE_CPU_TO_LE16(state);
2080 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2081 struct ecore_rx_mode_ramrod_params *p)
2083 struct eth_filter_rules_ramrod_data *data = p->rdata;
2085 uint8_t rule_idx = 0;
2087 /* Reset the ramrod data buffer */
2088 ECORE_MEMSET(data, 0, sizeof(*data));
2090 /* Setup ramrod data */
2092 /* Tx (internal switching) */
2093 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2094 data->rules[rule_idx].client_id = p->cl_id;
2095 data->rules[rule_idx].func_id = p->func_id;
2097 data->rules[rule_idx].cmd_general_data =
2098 ETH_FILTER_RULES_CMD_TX_CMD;
2100 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2101 &(data->rules[rule_idx++]),
2106 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2107 data->rules[rule_idx].client_id = p->cl_id;
2108 data->rules[rule_idx].func_id = p->func_id;
2110 data->rules[rule_idx].cmd_general_data =
2111 ETH_FILTER_RULES_CMD_RX_CMD;
2113 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2114 &(data->rules[rule_idx++]),
2118 /* If FCoE Queue configuration has been requested configure the Rx and
2119 * internal switching modes for this queue in separate rules.
2121 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2122 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2124 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2125 /* Tx (internal switching) */
2126 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2127 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2128 data->rules[rule_idx].func_id = p->func_id;
2130 data->rules[rule_idx].cmd_general_data =
2131 ETH_FILTER_RULES_CMD_TX_CMD;
2133 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2135 [rule_idx++]), TRUE);
2139 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2140 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2141 data->rules[rule_idx].func_id = p->func_id;
2143 data->rules[rule_idx].cmd_general_data =
2144 ETH_FILTER_RULES_CMD_RX_CMD;
2146 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2148 [rule_idx++]), TRUE);
2152 /* Set the ramrod header (most importantly - number of rules to
2155 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2158 (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2159 data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2161 /* No need for an explicit memory barrier here as long we would
2162 * need to ensure the ordering of writing to the SPQ element
2163 * and updating of the SPQ producer which involves a memory
2164 * read and we will have to put a full memory barrier there
2165 * (inside ecore_sp_post()).
2169 rc = ecore_sp_post(sc,
2170 RAMROD_CMD_ID_ETH_FILTER_RULES,
2171 p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2175 /* Ramrod completion is pending */
2176 return ECORE_PENDING;
2179 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2180 struct ecore_rx_mode_ramrod_params *p)
2182 return ecore_state_wait(sc, p->state, p->pstate);
2185 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2187 ecore_rx_mode_ramrod_params *p)
2190 return ECORE_SUCCESS;
2193 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2194 struct ecore_rx_mode_ramrod_params *p)
2198 /* Configure the new classification in the chip */
2199 if (p->rx_mode_obj->config_rx_mode) {
2200 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2204 /* Wait for a ramrod completion if was requested */
2205 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2206 rc = p->rx_mode_obj->wait_comp(sc, p);
2211 ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
2218 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2220 if (CHIP_IS_E1x(sc)) {
2221 o->wait_comp = ecore_empty_rx_mode_wait;
2222 o->config_rx_mode = ecore_set_rx_mode_e1x;
2224 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2225 o->config_rx_mode = ecore_set_rx_mode_e2;
2229 /********************* Multicast verbs: SET, CLEAR ****************************/
2230 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2232 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2235 struct ecore_mcast_mac_elem {
2236 ecore_list_entry_t link;
2237 uint8_t mac[ETH_ALEN];
2238 uint8_t pad[2]; /* For a natural alignment of the following buffer */
2241 struct ecore_pending_mcast_cmd {
2242 ecore_list_entry_t link;
2243 int type; /* ECORE_MCAST_CMD_X */
2245 ecore_list_t macs_head;
2246 uint32_t macs_num; /* Needed for DEL command */
2247 int next_bin; /* Needed for RESTORE flow with aprox match */
2250 int done; /* set to TRUE, when the command has been handled,
2251 * practically used in 57712 handling only, where one pending
2252 * command may be handled in a few operations. As long as for
2253 * other chips every operation handling is completed in a
2254 * single ramrod, there is no need to utilize this field.
2258 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2260 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2261 o->raw.wait_comp(sc, &o->raw))
2262 return ECORE_TIMEOUT;
2264 return ECORE_SUCCESS;
2267 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2268 struct ecore_mcast_obj *o,
2269 struct ecore_mcast_ramrod_params *p,
2270 enum ecore_mcast_cmd cmd)
2273 struct ecore_pending_mcast_cmd *new_cmd;
2274 struct ecore_mcast_mac_elem *cur_mac = NULL;
2275 struct ecore_mcast_list_elem *pos;
2276 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2277 p->mcast_list_len : 0);
2279 /* If the command is empty ("handle pending commands only"), break */
2280 if (!p->mcast_list_len)
2281 return ECORE_SUCCESS;
2283 total_sz = sizeof(*new_cmd) +
2284 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2286 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2287 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2292 ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
2293 cmd, macs_list_len);
2295 ECORE_LIST_INIT(&new_cmd->data.macs_head);
2297 new_cmd->type = cmd;
2298 new_cmd->done = FALSE;
2301 case ECORE_MCAST_CMD_ADD:
2302 cur_mac = (struct ecore_mcast_mac_elem *)
2303 ((uint8_t *) new_cmd + sizeof(*new_cmd));
2305 /* Push the MACs of the current command into the pending command
2308 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2309 struct ecore_mcast_list_elem) {
2310 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2311 ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2312 &new_cmd->data.macs_head);
2318 case ECORE_MCAST_CMD_DEL:
2319 new_cmd->data.macs_num = p->mcast_list_len;
2322 case ECORE_MCAST_CMD_RESTORE:
2323 new_cmd->data.next_bin = 0;
2327 ECORE_FREE(sc, new_cmd, total_sz);
2328 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2332 /* Push the new pending command to the tail of the pending list: FIFO */
2333 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2337 return ECORE_PENDING;
2341 * ecore_mcast_get_next_bin - get the next set bin (index)
2344 * @last: index to start looking from (including)
2346 * returns the next found (set) bin or a negative value if none is found.
2348 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2350 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2352 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2353 if (o->registry.aprox_match.vec[i])
2354 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2355 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2356 if (BIT_VEC64_TEST_BIT
2357 (o->registry.aprox_match.vec, cur_bit)) {
2369 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2373 * returns the index of the found bin or -1 if none is found
2375 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2377 int cur_bit = ecore_mcast_get_next_bin(o, 0);
2380 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2385 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2387 struct ecore_raw_obj *raw = &o->raw;
2388 uint8_t rx_tx_flag = 0;
2390 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2391 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2392 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2394 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2395 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2396 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2401 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2402 struct ecore_mcast_obj *o, int idx,
2403 union ecore_mcast_config_data *cfg_data,
2404 enum ecore_mcast_cmd cmd)
2406 struct ecore_raw_obj *r = &o->raw;
2407 struct eth_multicast_rules_ramrod_data *data =
2408 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2409 uint8_t func_id = r->func_id;
2410 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2413 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2414 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2416 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2418 /* Get a bin and update a bins' vector */
2420 case ECORE_MCAST_CMD_ADD:
2421 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2422 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2425 case ECORE_MCAST_CMD_DEL:
2426 /* If there were no more bins to clear
2427 * (ecore_mcast_clear_first_bin() returns -1) then we would
2428 * clear any (0xff) bin.
2429 * See ecore_mcast_validate_e2() for explanation when it may
2432 bin = ecore_mcast_clear_first_bin(o);
2435 case ECORE_MCAST_CMD_RESTORE:
2436 bin = cfg_data->bin;
2440 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2444 ECORE_MSG(sc, "%s bin %d",
2445 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2446 "Setting" : "Clearing"), bin);
2448 data->rules[idx].bin_id = (uint8_t) bin;
2449 data->rules[idx].func_id = func_id;
2450 data->rules[idx].engine_id = o->engine_id;
2454 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2456 * @sc: device handle
2458 * @start_bin: index in the registry to start from (including)
2459 * @rdata_idx: index in the ramrod data to start from
2461 * returns last handled bin index or -1 if all bins have been handled
2463 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2464 struct ecore_mcast_obj *o,
2465 int start_bin, int *rdata_idx)
2467 int cur_bin, cnt = *rdata_idx;
2468 union ecore_mcast_config_data cfg_data = { NULL };
2470 /* go through the registry and configure the bins from it */
2471 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2472 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2474 cfg_data.bin = (uint8_t) cur_bin;
2475 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2479 ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
2481 /* Break if we reached the maximum number
2484 if (cnt >= o->max_cmd_len)
2493 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2494 struct ecore_mcast_obj *o,
2495 struct ecore_pending_mcast_cmd
2496 *cmd_pos, int *line_idx)
2498 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2499 int cnt = *line_idx;
2500 union ecore_mcast_config_data cfg_data = { NULL };
2502 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2503 &cmd_pos->data.macs_head, link,
2504 struct ecore_mcast_mac_elem) {
2506 cfg_data.mac = &pmac_pos->mac[0];
2507 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2512 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2513 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2514 pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2516 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2517 &cmd_pos->data.macs_head);
2519 /* Break if we reached the maximum number
2522 if (cnt >= o->max_cmd_len)
2528 /* if no more MACs to configure - we are done */
2529 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2530 cmd_pos->done = TRUE;
2533 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2534 struct ecore_mcast_obj *o,
2535 struct ecore_pending_mcast_cmd
2536 *cmd_pos, int *line_idx)
2538 int cnt = *line_idx;
2540 while (cmd_pos->data.macs_num) {
2541 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2545 cmd_pos->data.macs_num--;
2547 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
2548 cmd_pos->data.macs_num, cnt);
2550 /* Break if we reached the maximum
2553 if (cnt >= o->max_cmd_len)
2559 /* If we cleared all bins - we are done */
2560 if (!cmd_pos->data.macs_num)
2561 cmd_pos->done = TRUE;
2564 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2565 struct ecore_mcast_obj *o, struct
2566 ecore_pending_mcast_cmd
2567 *cmd_pos, int *line_idx)
2569 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2572 if (cmd_pos->data.next_bin < 0)
2573 /* If o->set_restore returned -1 we are done */
2574 cmd_pos->done = TRUE;
2576 /* Start from the next bin next time */
2577 cmd_pos->data.next_bin++;
2580 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2581 ecore_mcast_ramrod_params
2584 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2586 struct ecore_mcast_obj *o = p->mcast_obj;
2588 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2589 &o->pending_cmds_head, link,
2590 struct ecore_pending_mcast_cmd) {
2591 switch (cmd_pos->type) {
2592 case ECORE_MCAST_CMD_ADD:
2593 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2596 case ECORE_MCAST_CMD_DEL:
2597 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2600 case ECORE_MCAST_CMD_RESTORE:
2601 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2606 PMD_DRV_LOG(ERR, sc,
2607 "Unknown command: %d", cmd_pos->type);
2611 /* If the command has been completed - remove it from the list
2612 * and free the memory
2614 if (cmd_pos->done) {
2615 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2616 &o->pending_cmds_head);
2617 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2620 /* Break if we reached the maximum number of rules */
2621 if (cnt >= o->max_cmd_len)
2628 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2629 struct ecore_mcast_obj *o,
2630 struct ecore_mcast_ramrod_params *p,
2633 struct ecore_mcast_list_elem *mlist_pos;
2634 union ecore_mcast_config_data cfg_data = { NULL };
2635 int cnt = *line_idx;
2637 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2638 struct ecore_mcast_list_elem) {
2639 cfg_data.mac = mlist_pos->mac;
2640 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2645 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2646 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2647 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2653 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2654 struct ecore_mcast_obj *o,
2655 struct ecore_mcast_ramrod_params *p,
2658 int cnt = *line_idx, i;
2660 for (i = 0; i < p->mcast_list_len; i++) {
2661 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2666 "Deleting MAC. %d left", p->mcast_list_len - i - 1);
2673 * ecore_mcast_handle_current_cmd -
2675 * @sc: device handle
2678 * @start_cnt: first line in the ramrod data that may be used
2680 * This function is called if there is enough place for the current command in
2682 * Returns number of lines filled in the ramrod data in total.
2684 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2685 ecore_mcast_ramrod_params *p,
2686 enum ecore_mcast_cmd cmd,
2689 struct ecore_mcast_obj *o = p->mcast_obj;
2690 int cnt = start_cnt;
2692 ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
2695 case ECORE_MCAST_CMD_ADD:
2696 ecore_mcast_hdl_add(sc, o, p, &cnt);
2699 case ECORE_MCAST_CMD_DEL:
2700 ecore_mcast_hdl_del(sc, o, p, &cnt);
2703 case ECORE_MCAST_CMD_RESTORE:
2704 o->hdl_restore(sc, o, 0, &cnt);
2708 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2712 /* The current command has been handled */
2713 p->mcast_list_len = 0;
2718 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2719 struct ecore_mcast_ramrod_params *p,
2720 enum ecore_mcast_cmd cmd)
2722 struct ecore_mcast_obj *o = p->mcast_obj;
2723 int reg_sz = o->get_registry_size(o);
2726 /* DEL command deletes all currently configured MACs */
2727 case ECORE_MCAST_CMD_DEL:
2728 o->set_registry_size(o, 0);
2731 /* RESTORE command will restore the entire multicast configuration */
2732 case ECORE_MCAST_CMD_RESTORE:
2733 /* Here we set the approximate amount of work to do, which in
2734 * fact may be only less as some MACs in postponed ADD
2735 * command(s) scheduled before this command may fall into
2736 * the same bin and the actual number of bins set in the
2737 * registry would be less than we estimated here. See
2738 * ecore_mcast_set_one_rule_e2() for further details.
2740 p->mcast_list_len = reg_sz;
2743 case ECORE_MCAST_CMD_ADD:
2744 case ECORE_MCAST_CMD_CONT:
2745 /* Here we assume that all new MACs will fall into new bins.
2746 * However we will correct the real registry size after we
2747 * handle all pending commands.
2749 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2753 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2757 /* Increase the total number of MACs pending to be configured */
2758 o->total_pending_num += p->mcast_list_len;
2760 return ECORE_SUCCESS;
2763 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2764 struct ecore_mcast_ramrod_params *p,
2767 struct ecore_mcast_obj *o = p->mcast_obj;
2769 o->set_registry_size(o, old_num_bins);
2770 o->total_pending_num -= p->mcast_list_len;
2774 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2776 * @sc: device handle
2778 * @len: number of rules to handle
2780 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2781 *sc, struct ecore_mcast_ramrod_params
2784 struct ecore_raw_obj *r = &p->mcast_obj->raw;
2785 struct eth_multicast_rules_ramrod_data *data =
2786 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2788 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2789 (ECORE_FILTER_MCAST_PENDING <<
2790 ECORE_SWCID_SHIFT));
2791 data->header.rule_cnt = len;
2795 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2797 * @sc: device handle
2800 * Recalculate the actual number of set bins in the registry using Brian
2801 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2803 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2808 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2809 elem = o->registry.aprox_match.vec[i];
2814 o->set_registry_size(o, cnt);
2816 return ECORE_SUCCESS;
2819 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2820 struct ecore_mcast_ramrod_params *p,
2821 enum ecore_mcast_cmd cmd)
2823 struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2824 struct ecore_mcast_obj *o = p->mcast_obj;
2825 struct eth_multicast_rules_ramrod_data *data =
2826 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2829 /* Reset the ramrod data buffer */
2830 ECORE_MEMSET(data, 0, sizeof(*data));
2832 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2834 /* If there are no more pending commands - clear SCHEDULED state */
2835 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2838 /* The below may be TRUE if there was enough room in ramrod
2839 * data for all pending commands and for the current
2840 * command. Otherwise the current command would have been added
2841 * to the pending commands and p->mcast_list_len would have been
2844 if (p->mcast_list_len > 0)
2845 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2847 /* We've pulled out some MACs - update the total number of
2850 o->total_pending_num -= cnt;
2853 ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2854 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2856 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2858 /* Update a registry size if there are no more pending operations.
2860 * We don't want to change the value of the registry size if there are
2861 * pending operations because we want it to always be equal to the
2862 * exact or the approximate number (see ecore_mcast_validate_e2()) of
2863 * set bins after the last requested operation in order to properly
2864 * evaluate the size of the next DEL/RESTORE operation.
2866 * Note that we update the registry itself during command(s) handling
2867 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2868 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2869 * with a limited amount of update commands (per MAC/bin) and we don't
2870 * know in this scope what the actual state of bins configuration is
2871 * going to be after this ramrod.
2873 if (!o->total_pending_num)
2874 ecore_mcast_refresh_registry_e2(o);
2876 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2877 * RAMROD_PENDING status immediately.
2879 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2880 raw->clear_pending(raw);
2881 return ECORE_SUCCESS;
2883 /* No need for an explicit memory barrier here as long we would
2884 * need to ensure the ordering of writing to the SPQ element
2885 * and updating of the SPQ producer which involves a memory
2886 * read and we will have to put a full memory barrier there
2887 * (inside ecore_sp_post()).
2891 rc = ecore_sp_post(sc,
2892 RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2894 raw->rdata_mapping, ETH_CONNECTION_TYPE);
2898 /* Ramrod completion is pending */
2899 return ECORE_PENDING;
2903 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2904 struct ecore_mcast_ramrod_params *p,
2905 enum ecore_mcast_cmd cmd)
2907 /* Mark, that there is a work to do */
2908 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2909 p->mcast_list_len = 1;
2911 return ECORE_SUCCESS;
2914 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2915 __rte_unused struct ecore_mcast_ramrod_params
2916 *p, __rte_unused int old_num_bins)
2921 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2923 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2926 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2927 struct ecore_mcast_obj *o,
2928 struct ecore_mcast_ramrod_params *p,
2929 uint32_t * mc_filter)
2931 struct ecore_mcast_list_elem *mlist_pos;
2934 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2935 struct ecore_mcast_list_elem) {
2936 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2937 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2940 (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2941 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2942 mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2945 /* bookkeeping... */
2946 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2950 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2952 struct ecore_mcast_obj *o,
2953 uint32_t * mc_filter)
2957 for (bit = ecore_mcast_get_next_bin(o, 0);
2958 bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2959 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2960 ECORE_MSG(sc, "About to set bin %d", bit);
2964 /* On 57711 we write the multicast MACs' approximate match
2965 * table by directly into the TSTORM's internal RAM. So we don't
2966 * really need to handle any tricks to make it work.
2968 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2969 struct ecore_mcast_ramrod_params *p,
2970 enum ecore_mcast_cmd cmd)
2973 struct ecore_mcast_obj *o = p->mcast_obj;
2974 struct ecore_raw_obj *r = &o->raw;
2976 /* If CLEAR_ONLY has been requested - clear the registry
2977 * and clear a pending bit.
2979 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2980 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2982 /* Set the multicast filter bits before writing it into
2983 * the internal memory.
2986 case ECORE_MCAST_CMD_ADD:
2987 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2990 case ECORE_MCAST_CMD_DEL:
2991 ECORE_MSG(sc, "Invalidating multicast MACs configuration");
2993 /* clear the registry */
2994 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
2995 sizeof(o->registry.aprox_match.vec));
2998 case ECORE_MCAST_CMD_RESTORE:
2999 ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
3003 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
3007 /* Set the mcast filter in the internal memory */
3008 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3009 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3011 /* clear the registry */
3012 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3013 sizeof(o->registry.aprox_match.vec));
3016 r->clear_pending(r);
3018 return ECORE_SUCCESS;
3021 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3023 return o->registry.aprox_match.num_bins_set;
3026 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3029 o->registry.aprox_match.num_bins_set = n;
3032 int ecore_config_mcast(struct bnx2x_softc *sc,
3033 struct ecore_mcast_ramrod_params *p,
3034 enum ecore_mcast_cmd cmd)
3036 struct ecore_mcast_obj *o = p->mcast_obj;
3037 struct ecore_raw_obj *r = &o->raw;
3038 int rc = 0, old_reg_size;
3040 /* This is needed to recover number of currently configured mcast macs
3041 * in case of failure.
3043 old_reg_size = o->get_registry_size(o);
3045 /* Do some calculations and checks */
3046 rc = o->validate(sc, p, cmd);
3050 /* Return if there is no work to do */
3051 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3052 return ECORE_SUCCESS;
3055 (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3056 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3058 /* Enqueue the current command to the pending list if we can't complete
3059 * it in the current iteration
3061 if (r->check_pending(r) ||
3062 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3063 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3067 /* As long as the current command is in a command list we
3068 * don't need to handle it separately.
3070 p->mcast_list_len = 0;
3073 if (!r->check_pending(r)) {
3075 /* Set 'pending' state */
3078 /* Configure the new classification in the chip */
3079 rc = o->config_mcast(sc, p, cmd);
3083 /* Wait for a ramrod completion if was requested */
3084 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3085 rc = o->wait_comp(sc, o);
3091 r->clear_pending(r);
3094 o->revert(sc, p, old_reg_size);
3099 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3101 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3102 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3103 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3106 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3108 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3109 ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3110 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3113 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3115 return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3118 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3120 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3123 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3124 struct ecore_mcast_obj *mcast_obj,
3125 uint8_t mcast_cl_id, uint32_t mcast_cid,
3126 uint8_t func_id, uint8_t engine_id, void *rdata,
3127 ecore_dma_addr_t rdata_mapping, int state,
3128 unsigned long *pstate, ecore_obj_type type)
3130 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3132 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3133 rdata, rdata_mapping, state, pstate, type);
3135 mcast_obj->engine_id = engine_id;
3137 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3139 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3140 mcast_obj->check_sched = ecore_mcast_check_sched;
3141 mcast_obj->set_sched = ecore_mcast_set_sched;
3142 mcast_obj->clear_sched = ecore_mcast_clear_sched;
3144 if (CHIP_IS_E1H(sc)) {
3145 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3146 mcast_obj->enqueue_cmd = NULL;
3147 mcast_obj->hdl_restore = NULL;
3148 mcast_obj->check_pending = ecore_mcast_check_pending;
3150 /* 57711 doesn't send a ramrod, so it has unlimited credit
3153 mcast_obj->max_cmd_len = -1;
3154 mcast_obj->wait_comp = ecore_mcast_wait;
3155 mcast_obj->set_one_rule = NULL;
3156 mcast_obj->validate = ecore_mcast_validate_e1h;
3157 mcast_obj->revert = ecore_mcast_revert_e1h;
3158 mcast_obj->get_registry_size =
3159 ecore_mcast_get_registry_size_aprox;
3160 mcast_obj->set_registry_size =
3161 ecore_mcast_set_registry_size_aprox;
3163 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3164 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3165 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3166 mcast_obj->check_pending = ecore_mcast_check_pending;
3167 mcast_obj->max_cmd_len = 16;
3168 mcast_obj->wait_comp = ecore_mcast_wait;
3169 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3170 mcast_obj->validate = ecore_mcast_validate_e2;
3171 mcast_obj->revert = ecore_mcast_revert_e2;
3172 mcast_obj->get_registry_size =
3173 ecore_mcast_get_registry_size_aprox;
3174 mcast_obj->set_registry_size =
3175 ecore_mcast_set_registry_size_aprox;
3179 /*************************** Credit handling **********************************/
3182 * atomic_add_ifless - add if the result is less than a given value.
3184 * @v: pointer of type ecore_atomic_t
3185 * @a: the amount to add to v...
3186 * @u: ...if (v + a) is less than u.
3188 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3191 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3195 c = ECORE_ATOMIC_READ(v);
3197 if (ECORE_UNLIKELY(c + a >= u))
3200 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3201 if (ECORE_LIKELY(old == c))
3210 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3212 * @v: pointer of type ecore_atomic_t
3213 * @a: the amount to dec from v...
3214 * @u: ...if (v - a) is more or equal than u.
3216 * returns TRUE if (v - a) was more or equal than u, and FALSE
3219 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3223 c = ECORE_ATOMIC_READ(v);
3225 if (ECORE_UNLIKELY(c - a < u))
3228 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3229 if (ECORE_LIKELY(old == c))
3237 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3242 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3248 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3254 /* Don't let to refill if credit + cnt > pool_sz */
3255 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3262 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3267 cur_credit = ECORE_ATOMIC_READ(&o->credit);
3272 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3273 ecore_credit_pool_obj *o,
3274 __rte_unused int cnt)
3279 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3286 /* Find "internal cam-offset" then add to base for this object... */
3287 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3289 /* Skip the current vector if there are no free entries in it */
3290 if (!o->pool_mirror[vec])
3293 /* If we've got here we are going to find a free entry */
3294 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3295 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3297 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3299 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3300 *offset = o->base_pool_offset + idx;
3308 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3311 if (offset < o->base_pool_offset)
3314 offset -= o->base_pool_offset;
3316 if (offset >= o->pool_sz)
3319 /* Return the entry to the pool */
3320 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3325 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3326 ecore_credit_pool_obj *o,
3327 __rte_unused int offset)
3332 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3333 ecore_credit_pool_obj *o,
3334 __rte_unused int *offset)
3341 * ecore_init_credit_pool - initialize credit pool internals.
3344 * @base: Base entry in the CAM to use.
3345 * @credit: pool size.
3347 * If base is negative no CAM entries handling will be performed.
3348 * If credit is negative pool operations will always succeed (unlimited pool).
3351 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3352 int base, int credit)
3354 /* Zero the object first */
3355 ECORE_MEMSET(p, 0, sizeof(*p));
3357 /* Set the table to all 1s */
3358 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3360 /* Init a pool as full */
3361 ECORE_ATOMIC_SET(&p->credit, credit);
3363 /* The total poll size */
3364 p->pool_sz = credit;
3366 p->base_pool_offset = base;
3368 /* Commit the change */
3371 p->check = ecore_credit_pool_check;
3373 /* if pool credit is negative - disable the checks */
3375 p->put = ecore_credit_pool_put;
3376 p->get = ecore_credit_pool_get;
3377 p->put_entry = ecore_credit_pool_put_entry;
3378 p->get_entry = ecore_credit_pool_get_entry;
3380 p->put = ecore_credit_pool_always_TRUE;
3381 p->get = ecore_credit_pool_always_TRUE;
3382 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3383 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3386 /* If base is negative - disable entries handling */
3388 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3389 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3393 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3394 struct ecore_credit_pool_obj *p,
3395 uint8_t func_id, uint8_t func_num)
3398 #define ECORE_CAM_SIZE_EMUL 5
3402 if (CHIP_IS_E1H(sc)) {
3403 /* CAM credit is equally divided between all active functions
3407 if (!CHIP_REV_IS_SLOW(sc))
3408 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3410 cam_sz = ECORE_CAM_SIZE_EMUL;
3411 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3413 /* this should never happen! Block MAC operations. */
3414 ecore_init_credit_pool(p, 0, 0);
3420 * CAM credit is equaly divided between all active functions
3424 if (!CHIP_REV_IS_SLOW(sc))
3425 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3427 cam_sz = ECORE_CAM_SIZE_EMUL;
3429 /* No need for CAM entries handling for 57712 and
3432 ecore_init_credit_pool(p, -1, cam_sz);
3434 /* this should never happen! Block MAC operations. */
3435 ecore_init_credit_pool(p, 0, 0);
3440 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3441 struct ecore_credit_pool_obj *p,
3442 uint8_t func_id, uint8_t func_num)
3444 if (CHIP_IS_E1x(sc)) {
3445 /* There is no VLAN credit in HW on 57711 only
3446 * MAC / MAC-VLAN can be set
3448 ecore_init_credit_pool(p, 0, -1);
3450 /* CAM credit is equally divided between all active functions
3454 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3455 ecore_init_credit_pool(p, func_id * credit, credit);
3457 /* this should never happen! Block VLAN operations. */
3458 ecore_init_credit_pool(p, 0, 0);
3462 /****************** RSS Configuration ******************/
3465 * ecore_setup_rss - configure RSS
3467 * @sc: device handle
3468 * @p: rss configuration
3470 * sends on UPDATE ramrod for that matter.
3472 static int ecore_setup_rss(struct bnx2x_softc *sc,
3473 struct ecore_config_rss_params *p)
3475 struct ecore_rss_config_obj *o = p->rss_obj;
3476 struct ecore_raw_obj *r = &o->raw;
3477 struct eth_rss_update_ramrod_data *data =
3478 (struct eth_rss_update_ramrod_data *)(r->rdata);
3479 uint8_t rss_mode = 0;
3482 ECORE_MEMSET(data, 0, sizeof(*data));
3484 ECORE_MSG(sc, "Configuring RSS");
3486 /* Set an echo field */
3487 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3488 (r->state << ECORE_SWCID_SHIFT));
3491 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3492 rss_mode = ETH_RSS_MODE_DISABLED;
3493 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3494 rss_mode = ETH_RSS_MODE_REGULAR;
3496 data->rss_mode = rss_mode;
3498 ECORE_MSG(sc, "rss_mode=%d", rss_mode);
3500 /* RSS capabilities */
3501 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3502 data->capabilities |=
3503 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3505 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3506 data->capabilities |=
3507 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3509 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3510 data->capabilities |=
3511 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3513 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3514 data->capabilities |=
3515 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3517 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3518 data->capabilities |=
3519 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3521 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3522 data->capabilities |=
3523 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3525 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3526 data->udp_4tuple_dst_port_mask =
3527 ECORE_CPU_TO_LE16(p->tunnel_mask);
3528 data->udp_4tuple_dst_port_value =
3529 ECORE_CPU_TO_LE16(p->tunnel_value);
3533 data->rss_result_mask = p->rss_result_mask;
3536 data->rss_engine_id = o->engine_id;
3538 ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
3540 /* Indirection table */
3541 ECORE_MEMCPY(data->indirection_table, p->ind_table,
3542 T_ETH_INDIRECTION_TABLE_SIZE);
3544 /* Remember the last configuration */
3545 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3548 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3549 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3550 sizeof(data->rss_key));
3551 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3554 /* No need for an explicit memory barrier here as long we would
3555 * need to ensure the ordering of writing to the SPQ element
3556 * and updating of the SPQ producer which involves a memory
3557 * read and we will have to put a full memory barrier there
3558 * (inside ecore_sp_post()).
3562 rc = ecore_sp_post(sc,
3563 RAMROD_CMD_ID_ETH_RSS_UPDATE,
3564 r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3569 return ECORE_PENDING;
3572 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3575 struct ecore_rss_config_obj *o = p->rss_obj;
3576 struct ecore_raw_obj *r = &o->raw;
3578 /* Do nothing if only driver cleanup was requested */
3579 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3580 return ECORE_SUCCESS;
3584 rc = o->config_rss(sc, p);
3586 r->clear_pending(r);
3590 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3591 rc = r->wait_comp(sc, r);
3596 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3597 uint8_t cl_id, uint32_t cid, uint8_t func_id,
3598 uint8_t engine_id, void *rdata,
3599 ecore_dma_addr_t rdata_mapping, int state,
3600 unsigned long *pstate, ecore_obj_type type)
3602 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3603 rdata_mapping, state, pstate, type);
3605 rss_obj->engine_id = engine_id;
3606 rss_obj->config_rss = ecore_setup_rss;
3609 /********************** Queue state object ***********************************/
3612 * ecore_queue_state_change - perform Queue state change transition
3614 * @sc: device handle
3615 * @params: parameters to perform the transition
3617 * returns 0 in case of successfully completed transition, negative error
3618 * code in case of failure, positive (EBUSY) value if there is a completion
3619 * to that is still pending (possible only if RAMROD_COMP_WAIT is
3620 * not set in params->ramrod_flags for asynchronous commands).
3623 int ecore_queue_state_change(struct bnx2x_softc *sc,
3624 struct ecore_queue_state_params *params)
3626 struct ecore_queue_sp_obj *o = params->q_obj;
3627 int rc, pending_bit;
3628 unsigned long *pending = &o->pending;
3630 /* Check that the requested transition is legal */
3631 rc = o->check_transition(sc, o, params);
3633 PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
3638 /* Set "pending" bit */
3639 ECORE_MSG(sc, "pending bit was=%lx", o->pending);
3640 pending_bit = o->set_pending(o, params);
3641 ECORE_MSG(sc, "pending bit now=%lx", o->pending);
3643 /* Don't send a command if only driver cleanup was requested */
3644 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
3645 o->complete_cmd(sc, o, pending_bit);
3648 rc = o->send_cmd(sc, params);
3650 o->next_state = ECORE_Q_STATE_MAX;
3651 ECORE_CLEAR_BIT(pending_bit, pending);
3652 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3656 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
3657 rc = o->wait_comp(sc, o, pending_bit);
3661 return ECORE_SUCCESS;
3665 return ECORE_RET_PENDING(pending_bit, pending);
3668 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3669 struct ecore_queue_state_params *params)
3671 enum ecore_queue_cmd cmd = params->cmd, bit;
3673 /* ACTIVATE and DEACTIVATE commands are implemented on top of
3676 if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3677 bit = ECORE_Q_CMD_UPDATE;
3681 ECORE_SET_BIT(bit, &obj->pending);
3685 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3686 struct ecore_queue_sp_obj *o,
3687 enum ecore_queue_cmd cmd)
3689 return ecore_state_wait(sc, cmd, &o->pending);
3693 * ecore_queue_comp_cmd - complete the state change command.
3695 * @sc: device handle
3699 * Checks that the arrived completion is expected.
3701 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3702 struct ecore_queue_sp_obj *o,
3703 enum ecore_queue_cmd cmd)
3705 unsigned long cur_pending = o->pending;
3707 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3708 PMD_DRV_LOG(ERR, sc,
3709 "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3710 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3711 cur_pending, o->next_state);
3715 if (o->next_tx_only >= o->max_cos)
3716 /* >= because tx only must always be smaller than cos since the
3717 * primary connection supports COS 0
3719 PMD_DRV_LOG(ERR, sc,
3720 "illegal value for next tx_only: %d. max cos was %d",
3721 o->next_tx_only, o->max_cos);
3723 ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
3724 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3726 if (o->next_tx_only) /* print num tx-only if any exist */
3727 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
3728 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3730 o->state = o->next_state;
3731 o->num_tx_only = o->next_tx_only;
3732 o->next_state = ECORE_Q_STATE_MAX;
3734 /* It's important that o->state and o->next_state are
3735 * updated before o->pending.
3739 ECORE_CLEAR_BIT(cmd, &o->pending);
3740 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3742 return ECORE_SUCCESS;
3745 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3747 struct client_init_ramrod_data *data)
3749 struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3753 /* IPv6 TPA supported for E2 and above only */
3754 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3756 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3759 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3760 struct ecore_queue_sp_obj *o,
3761 struct ecore_general_setup_params
3762 *params, struct client_init_general_data
3763 *gen_data, unsigned long *flags)
3765 gen_data->client_id = o->cl_id;
3767 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3768 gen_data->statistics_counter_id = params->stat_id;
3769 gen_data->statistics_en_flg = 1;
3770 gen_data->statistics_zero_flg =
3771 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3773 gen_data->statistics_counter_id =
3774 DISABLE_STATISTIC_COUNTER_ID_VALUE;
3776 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3777 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3778 gen_data->sp_client_id = params->spcl_id;
3779 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3780 gen_data->func_id = o->func_id;
3782 gen_data->cos = params->cos;
3784 gen_data->traffic_type =
3785 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3786 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3788 ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
3789 gen_data->activate_flg, gen_data->cos,
3790 gen_data->statistics_en_flg);
3793 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3794 struct client_init_tx_data *tx_data,
3795 unsigned long *flags)
3797 tx_data->enforce_security_flg =
3798 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3799 tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3800 tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3801 tx_data->tx_switching_flg =
3802 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3803 tx_data->anti_spoofing_flg =
3804 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3805 tx_data->force_default_pri_flg =
3806 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3807 tx_data->refuse_outband_vlan_flg =
3808 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3809 tx_data->tunnel_non_lso_pcsum_location =
3810 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3813 tx_data->tx_status_block_id = params->fw_sb_id;
3814 tx_data->tx_sb_index_number = params->sb_cq_index;
3815 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3817 tx_data->tx_bd_page_base.lo =
3818 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3819 tx_data->tx_bd_page_base.hi =
3820 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3822 /* Don't configure any Tx switching mode during queue SETUP */
3826 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3827 struct client_init_rx_data *rx_data)
3829 /* flow control data */
3830 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3831 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3832 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3833 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3834 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3835 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3836 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3839 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3840 struct client_init_rx_data *rx_data,
3841 unsigned long *flags)
3843 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3844 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3845 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3846 CLIENT_INIT_RX_DATA_TPA_MODE;
3847 rx_data->vmqueue_mode_en_flg = 0;
3849 rx_data->extra_data_over_sgl_en_flg =
3850 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3851 rx_data->cache_line_alignment_log_size = params->cache_line_log;
3852 rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3853 rx_data->client_qzone_id = params->cl_qzone_id;
3854 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3856 /* Always start in DROP_ALL mode */
3857 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3858 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3860 /* We don't set drop flags */
3861 rx_data->drop_ip_cs_err_flg = 0;
3862 rx_data->drop_tcp_cs_err_flg = 0;
3863 rx_data->drop_ttl0_flg = 0;
3864 rx_data->drop_udp_cs_err_flg = 0;
3865 rx_data->inner_vlan_removal_enable_flg =
3866 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3867 rx_data->outer_vlan_removal_enable_flg =
3868 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3869 rx_data->status_block_id = params->fw_sb_id;
3870 rx_data->rx_sb_index_number = params->sb_cq_index;
3871 rx_data->max_tpa_queues = params->max_tpa_queues;
3872 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3873 rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3874 rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3875 rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3876 rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3877 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3880 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3881 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3882 rx_data->is_approx_mcast = 1;
3885 rx_data->rss_engine_id = params->rss_engine_id;
3887 /* silent vlan removal */
3888 rx_data->silent_vlan_removal_flg =
3889 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3890 rx_data->silent_vlan_value =
3891 ECORE_CPU_TO_LE16(params->silent_removal_value);
3892 rx_data->silent_vlan_mask =
3893 ECORE_CPU_TO_LE16(params->silent_removal_mask);
3896 /* initialize the general, tx and rx parts of a queue object */
3897 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3899 struct client_init_ramrod_data *data)
3901 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3902 &cmd_params->params.setup.gen_params,
3904 &cmd_params->params.setup.flags);
3906 ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3907 &data->tx, &cmd_params->params.setup.flags);
3909 ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3910 &data->rx, &cmd_params->params.setup.flags);
3912 ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3916 /* initialize the general and tx parts of a tx-only queue object */
3917 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3919 struct tx_queue_init_ramrod_data *data)
3921 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3922 &cmd_params->params.tx_only.gen_params,
3924 &cmd_params->params.tx_only.flags);
3926 ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3927 &data->tx, &cmd_params->params.tx_only.flags);
3929 ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
3930 cmd_params->q_obj->cids[0],
3931 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3935 * ecore_q_init - init HW/FW queue
3937 * @sc: device handle
3940 * HW/FW initial Queue configuration:
3942 * - CDU context validation
3945 static int ecore_q_init(struct bnx2x_softc *sc,
3946 struct ecore_queue_state_params *params)
3948 struct ecore_queue_sp_obj *o = params->q_obj;
3949 struct ecore_queue_init_params *init = ¶ms->params.init;
3953 /* Tx HC configuration */
3954 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3955 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3956 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3958 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3959 init->tx.sb_cq_index,
3962 &init->tx.flags), hc_usec);
3965 /* Rx HC configuration */
3966 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3967 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3968 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3970 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3971 init->rx.sb_cq_index,
3974 &init->rx.flags), hc_usec);
3977 /* Set CDU context validation values */
3978 for (cos = 0; cos < o->max_cos; cos++) {
3979 ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
3981 ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
3982 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3985 /* As no ramrod is sent, complete the command immediately */
3986 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3991 return ECORE_SUCCESS;
3994 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
3997 struct ecore_queue_sp_obj *o = params->q_obj;
3998 struct client_init_ramrod_data *rdata =
3999 (struct client_init_ramrod_data *)o->rdata;
4000 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4001 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4003 /* Clear the ramrod data */
4004 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4006 /* Fill the ramrod data */
4007 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4009 /* No need for an explicit memory barrier here as long we would
4010 * need to ensure the ordering of writing to the SPQ element
4011 * and updating of the SPQ producer which involves a memory
4012 * read and we will have to put a full memory barrier there
4013 * (inside ecore_sp_post()).
4016 return ecore_sp_post(sc,
4018 o->cids[ECORE_PRIMARY_CID_INDEX],
4019 data_mapping, ETH_CONNECTION_TYPE);
4022 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4023 struct ecore_queue_state_params *params)
4025 struct ecore_queue_sp_obj *o = params->q_obj;
4026 struct client_init_ramrod_data *rdata =
4027 (struct client_init_ramrod_data *)o->rdata;
4028 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4029 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4031 /* Clear the ramrod data */
4032 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4034 /* Fill the ramrod data */
4035 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4036 ecore_q_fill_setup_data_e2(params, rdata);
4038 /* No need for an explicit memory barrier here as long we would
4039 * need to ensure the ordering of writing to the SPQ element
4040 * and updating of the SPQ producer which involves a memory
4041 * read and we will have to put a full memory barrier there
4042 * (inside ecore_sp_post()).
4045 return ecore_sp_post(sc,
4047 o->cids[ECORE_PRIMARY_CID_INDEX],
4048 data_mapping, ETH_CONNECTION_TYPE);
4051 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4054 struct ecore_queue_sp_obj *o = params->q_obj;
4055 struct tx_queue_init_ramrod_data *rdata =
4056 (struct tx_queue_init_ramrod_data *)o->rdata;
4057 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4058 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4059 struct ecore_queue_setup_tx_only_params *tx_only_params =
4060 ¶ms->params.tx_only;
4061 uint8_t cid_index = tx_only_params->cid_index;
4063 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4064 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4065 ECORE_MSG(sc, "sending forward tx-only ramrod");
4067 if (cid_index >= o->max_cos) {
4068 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4069 o->cl_id, cid_index);
4073 ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
4074 tx_only_params->gen_params.cos,
4075 tx_only_params->gen_params.spcl_id);
4077 /* Clear the ramrod data */
4078 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4080 /* Fill the ramrod data */
4081 ecore_q_fill_setup_tx_only(sc, params, rdata);
4084 (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4085 o->cids[cid_index], rdata->general.client_id,
4086 rdata->general.sp_client_id, rdata->general.cos);
4088 /* No need for an explicit memory barrier here as long we would
4089 * need to ensure the ordering of writing to the SPQ element
4090 * and updating of the SPQ producer which involves a memory
4091 * read and we will have to put a full memory barrier there
4092 * (inside ecore_sp_post()).
4095 return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4096 data_mapping, ETH_CONNECTION_TYPE);
4099 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4100 struct ecore_queue_update_params *params,
4101 struct client_update_ramrod_data *data)
4103 /* Client ID of the client to update */
4104 data->client_id = obj->cl_id;
4106 /* Function ID of the client to update */
4107 data->func_id = obj->func_id;
4109 /* Default VLAN value */
4110 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4112 /* Inner VLAN stripping */
4113 data->inner_vlan_removal_enable_flg =
4114 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4115 data->inner_vlan_removal_change_flg =
4116 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4117 ¶ms->update_flags);
4119 /* Outer VLAN stripping */
4120 data->outer_vlan_removal_enable_flg =
4121 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4122 data->outer_vlan_removal_change_flg =
4123 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4124 ¶ms->update_flags);
4126 /* Drop packets that have source MAC that doesn't belong to this
4129 data->anti_spoofing_enable_flg =
4130 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4131 data->anti_spoofing_change_flg =
4132 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4133 ¶ms->update_flags);
4135 /* Activate/Deactivate */
4136 data->activate_flg =
4137 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4138 data->activate_change_flg =
4139 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4141 /* Enable default VLAN */
4142 data->default_vlan_enable_flg =
4143 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4144 data->default_vlan_change_flg =
4145 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4146 ¶ms->update_flags);
4148 /* silent vlan removal */
4149 data->silent_vlan_change_flg =
4150 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4151 ¶ms->update_flags);
4152 data->silent_vlan_removal_flg =
4153 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4154 ¶ms->update_flags);
4155 data->silent_vlan_value =
4156 ECORE_CPU_TO_LE16(params->silent_removal_value);
4157 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4160 data->tx_switching_flg =
4161 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags);
4162 data->tx_switching_change_flg =
4163 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4164 ¶ms->update_flags);
4167 static int ecore_q_send_update(struct bnx2x_softc *sc,
4168 struct ecore_queue_state_params *params)
4170 struct ecore_queue_sp_obj *o = params->q_obj;
4171 struct client_update_ramrod_data *rdata =
4172 (struct client_update_ramrod_data *)o->rdata;
4173 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4174 struct ecore_queue_update_params *update_params =
4175 ¶ms->params.update;
4176 uint8_t cid_index = update_params->cid_index;
4178 if (cid_index >= o->max_cos) {
4179 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4180 o->cl_id, cid_index);
4184 /* Clear the ramrod data */
4185 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4187 /* Fill the ramrod data */
4188 ecore_q_fill_update_data(o, update_params, rdata);
4190 /* No need for an explicit memory barrier here as long we would
4191 * need to ensure the ordering of writing to the SPQ element
4192 * and updating of the SPQ producer which involves a memory
4193 * read and we will have to put a full memory barrier there
4194 * (inside ecore_sp_post()).
4197 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4198 o->cids[cid_index], data_mapping,
4199 ETH_CONNECTION_TYPE);
4203 * ecore_q_send_deactivate - send DEACTIVATE command
4205 * @sc: device handle
4208 * implemented using the UPDATE command.
4210 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4213 struct ecore_queue_update_params *update = ¶ms->params.update;
4215 ECORE_MEMSET(update, 0, sizeof(*update));
4217 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4219 return ecore_q_send_update(sc, params);
4223 * ecore_q_send_activate - send ACTIVATE command
4225 * @sc: device handle
4228 * implemented using the UPDATE command.
4230 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4231 struct ecore_queue_state_params *params)
4233 struct ecore_queue_update_params *update = ¶ms->params.update;
4235 ECORE_MEMSET(update, 0, sizeof(*update));
4237 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4238 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4240 return ecore_q_send_update(sc, params);
4243 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4245 ecore_queue_state_params *params)
4247 /* Not implemented yet. */
4251 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4252 struct ecore_queue_state_params *params)
4254 struct ecore_queue_sp_obj *o = params->q_obj;
4256 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4257 ecore_dma_addr_t data_mapping = 0;
4258 data_mapping = (ecore_dma_addr_t) o->cl_id;
4260 return ecore_sp_post(sc,
4261 RAMROD_CMD_ID_ETH_HALT,
4262 o->cids[ECORE_PRIMARY_CID_INDEX],
4263 data_mapping, ETH_CONNECTION_TYPE);
4266 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4267 struct ecore_queue_state_params *params)
4269 struct ecore_queue_sp_obj *o = params->q_obj;
4270 uint8_t cid_idx = params->params.cfc_del.cid_index;
4272 if (cid_idx >= o->max_cos) {
4273 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4278 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4279 o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4282 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4285 struct ecore_queue_sp_obj *o = params->q_obj;
4286 uint8_t cid_index = params->params.terminate.cid_index;
4288 if (cid_index >= o->max_cos) {
4289 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4290 o->cl_id, cid_index);
4294 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4295 o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4298 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4299 struct ecore_queue_state_params *params)
4301 struct ecore_queue_sp_obj *o = params->q_obj;
4303 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4304 o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4305 ETH_CONNECTION_TYPE);
4308 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4311 switch (params->cmd) {
4312 case ECORE_Q_CMD_INIT:
4313 return ecore_q_init(sc, params);
4314 case ECORE_Q_CMD_SETUP_TX_ONLY:
4315 return ecore_q_send_setup_tx_only(sc, params);
4316 case ECORE_Q_CMD_DEACTIVATE:
4317 return ecore_q_send_deactivate(sc, params);
4318 case ECORE_Q_CMD_ACTIVATE:
4319 return ecore_q_send_activate(sc, params);
4320 case ECORE_Q_CMD_UPDATE:
4321 return ecore_q_send_update(sc, params);
4322 case ECORE_Q_CMD_UPDATE_TPA:
4323 return ecore_q_send_update_tpa(sc, params);
4324 case ECORE_Q_CMD_HALT:
4325 return ecore_q_send_halt(sc, params);
4326 case ECORE_Q_CMD_CFC_DEL:
4327 return ecore_q_send_cfc_del(sc, params);
4328 case ECORE_Q_CMD_TERMINATE:
4329 return ecore_q_send_terminate(sc, params);
4330 case ECORE_Q_CMD_EMPTY:
4331 return ecore_q_send_empty(sc, params);
4333 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4338 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4339 struct ecore_queue_state_params *params)
4341 switch (params->cmd) {
4342 case ECORE_Q_CMD_SETUP:
4343 return ecore_q_send_setup_e1x(sc, params);
4344 case ECORE_Q_CMD_INIT:
4345 case ECORE_Q_CMD_SETUP_TX_ONLY:
4346 case ECORE_Q_CMD_DEACTIVATE:
4347 case ECORE_Q_CMD_ACTIVATE:
4348 case ECORE_Q_CMD_UPDATE:
4349 case ECORE_Q_CMD_UPDATE_TPA:
4350 case ECORE_Q_CMD_HALT:
4351 case ECORE_Q_CMD_CFC_DEL:
4352 case ECORE_Q_CMD_TERMINATE:
4353 case ECORE_Q_CMD_EMPTY:
4354 return ecore_queue_send_cmd_cmn(sc, params);
4356 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4361 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4362 struct ecore_queue_state_params *params)
4364 switch (params->cmd) {
4365 case ECORE_Q_CMD_SETUP:
4366 return ecore_q_send_setup_e2(sc, params);
4367 case ECORE_Q_CMD_INIT:
4368 case ECORE_Q_CMD_SETUP_TX_ONLY:
4369 case ECORE_Q_CMD_DEACTIVATE:
4370 case ECORE_Q_CMD_ACTIVATE:
4371 case ECORE_Q_CMD_UPDATE:
4372 case ECORE_Q_CMD_UPDATE_TPA:
4373 case ECORE_Q_CMD_HALT:
4374 case ECORE_Q_CMD_CFC_DEL:
4375 case ECORE_Q_CMD_TERMINATE:
4376 case ECORE_Q_CMD_EMPTY:
4377 return ecore_queue_send_cmd_cmn(sc, params);
4379 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4385 * ecore_queue_chk_transition - check state machine of a regular Queue
4387 * @sc: device handle
4392 * It both checks if the requested command is legal in a current
4393 * state and, if it's legal, sets a `next_state' in the object
4394 * that will be used in the completion flow to set the `state'
4397 * returns 0 if a requested command is a legal transition,
4398 * ECORE_INVAL otherwise.
4400 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4401 struct ecore_queue_sp_obj *o,
4402 struct ecore_queue_state_params *params)
4404 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4405 enum ecore_queue_cmd cmd = params->cmd;
4406 struct ecore_queue_update_params *update_params =
4407 ¶ms->params.update;
4408 uint8_t next_tx_only = o->num_tx_only;
4410 /* Forget all pending for completion commands if a driver only state
4411 * transition has been requested.
4413 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4415 o->next_state = ECORE_Q_STATE_MAX;
4418 /* Don't allow a next state transition if we are in the middle of
4422 PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx",
4428 case ECORE_Q_STATE_RESET:
4429 if (cmd == ECORE_Q_CMD_INIT)
4430 next_state = ECORE_Q_STATE_INITIALIZED;
4433 case ECORE_Q_STATE_INITIALIZED:
4434 if (cmd == ECORE_Q_CMD_SETUP) {
4435 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4436 ¶ms->params.setup.flags))
4437 next_state = ECORE_Q_STATE_ACTIVE;
4439 next_state = ECORE_Q_STATE_INACTIVE;
4443 case ECORE_Q_STATE_ACTIVE:
4444 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4445 next_state = ECORE_Q_STATE_INACTIVE;
4447 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4448 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4449 next_state = ECORE_Q_STATE_ACTIVE;
4451 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4452 next_state = ECORE_Q_STATE_MULTI_COS;
4456 else if (cmd == ECORE_Q_CMD_HALT)
4457 next_state = ECORE_Q_STATE_STOPPED;
4459 else if (cmd == ECORE_Q_CMD_UPDATE) {
4460 /* If "active" state change is requested, update the
4461 * state accordingly.
4463 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4464 &update_params->update_flags) &&
4465 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4466 &update_params->update_flags))
4467 next_state = ECORE_Q_STATE_INACTIVE;
4469 next_state = ECORE_Q_STATE_ACTIVE;
4473 case ECORE_Q_STATE_MULTI_COS:
4474 if (cmd == ECORE_Q_CMD_TERMINATE)
4475 next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4477 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4478 next_state = ECORE_Q_STATE_MULTI_COS;
4479 next_tx_only = o->num_tx_only + 1;
4482 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4483 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4484 next_state = ECORE_Q_STATE_MULTI_COS;
4486 else if (cmd == ECORE_Q_CMD_UPDATE) {
4487 /* If "active" state change is requested, update the
4488 * state accordingly.
4490 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4491 &update_params->update_flags) &&
4492 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4493 &update_params->update_flags))
4494 next_state = ECORE_Q_STATE_INACTIVE;
4496 next_state = ECORE_Q_STATE_MULTI_COS;
4500 case ECORE_Q_STATE_MCOS_TERMINATED:
4501 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4502 next_tx_only = o->num_tx_only - 1;
4503 if (next_tx_only == 0)
4504 next_state = ECORE_Q_STATE_ACTIVE;
4506 next_state = ECORE_Q_STATE_MULTI_COS;
4510 case ECORE_Q_STATE_INACTIVE:
4511 if (cmd == ECORE_Q_CMD_ACTIVATE)
4512 next_state = ECORE_Q_STATE_ACTIVE;
4514 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4515 (cmd == ECORE_Q_CMD_UPDATE_TPA))
4516 next_state = ECORE_Q_STATE_INACTIVE;
4518 else if (cmd == ECORE_Q_CMD_HALT)
4519 next_state = ECORE_Q_STATE_STOPPED;
4521 else if (cmd == ECORE_Q_CMD_UPDATE) {
4522 /* If "active" state change is requested, update the
4523 * state accordingly.
4525 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4526 &update_params->update_flags) &&
4527 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4528 &update_params->update_flags)) {
4529 if (o->num_tx_only == 0)
4530 next_state = ECORE_Q_STATE_ACTIVE;
4531 else /* tx only queues exist for this queue */
4532 next_state = ECORE_Q_STATE_MULTI_COS;
4534 next_state = ECORE_Q_STATE_INACTIVE;
4538 case ECORE_Q_STATE_STOPPED:
4539 if (cmd == ECORE_Q_CMD_TERMINATE)
4540 next_state = ECORE_Q_STATE_TERMINATED;
4543 case ECORE_Q_STATE_TERMINATED:
4544 if (cmd == ECORE_Q_CMD_CFC_DEL)
4545 next_state = ECORE_Q_STATE_RESET;
4549 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4552 /* Transition is assured */
4553 if (next_state != ECORE_Q_STATE_MAX) {
4554 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4555 state, cmd, next_state);
4556 o->next_state = next_state;
4557 o->next_tx_only = next_tx_only;
4558 return ECORE_SUCCESS;
4561 ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4567 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4569 * @sc: device handle
4573 * It both checks if the requested command is legal in a current
4574 * state and, if it's legal, sets a `next_state' in the object
4575 * that will be used in the completion flow to set the `state'
4578 * returns 0 if a requested command is a legal transition,
4579 * ECORE_INVAL otherwise.
4581 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4582 struct ecore_queue_sp_obj *o,
4583 struct ecore_queue_state_params
4586 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4587 enum ecore_queue_cmd cmd = params->cmd;
4590 case ECORE_Q_STATE_RESET:
4591 if (cmd == ECORE_Q_CMD_INIT)
4592 next_state = ECORE_Q_STATE_INITIALIZED;
4595 case ECORE_Q_STATE_INITIALIZED:
4596 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4597 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4598 ¶ms->params.tx_only.flags))
4599 next_state = ECORE_Q_STATE_ACTIVE;
4601 next_state = ECORE_Q_STATE_INACTIVE;
4605 case ECORE_Q_STATE_ACTIVE:
4606 case ECORE_Q_STATE_INACTIVE:
4607 if (cmd == ECORE_Q_CMD_CFC_DEL)
4608 next_state = ECORE_Q_STATE_RESET;
4612 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4615 /* Transition is assured */
4616 if (next_state != ECORE_Q_STATE_MAX) {
4617 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4618 state, cmd, next_state);
4619 o->next_state = next_state;
4620 return ECORE_SUCCESS;
4623 ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4627 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4628 struct ecore_queue_sp_obj *obj,
4629 uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4630 uint8_t func_id, void *rdata,
4631 ecore_dma_addr_t rdata_mapping, unsigned long type)
4633 ECORE_MEMSET(obj, 0, sizeof(*obj));
4635 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4636 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4638 rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4639 obj->max_cos = cid_cnt;
4641 obj->func_id = func_id;
4643 obj->rdata_mapping = rdata_mapping;
4645 obj->next_state = ECORE_Q_STATE_MAX;
4647 if (CHIP_IS_E1x(sc))
4648 obj->send_cmd = ecore_queue_send_cmd_e1x;
4650 obj->send_cmd = ecore_queue_send_cmd_e2;
4652 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4653 obj->check_transition = ecore_queue_chk_fwd_transition;
4655 obj->check_transition = ecore_queue_chk_transition;
4657 obj->complete_cmd = ecore_queue_comp_cmd;
4658 obj->wait_comp = ecore_queue_wait_comp;
4659 obj->set_pending = ecore_queue_set_pending;
4662 /********************** Function state object *********************************/
4663 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4664 struct ecore_func_sp_obj *o)
4666 /* in the middle of transaction - return INVALID state */
4668 return ECORE_F_STATE_MAX;
4670 /* unsure the order of reading of o->pending and o->state
4671 * o->pending should be read first
4678 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4679 struct ecore_func_sp_obj *o,
4680 enum ecore_func_cmd cmd)
4682 return ecore_state_wait(sc, cmd, &o->pending);
4686 * ecore_func_state_change_comp - complete the state machine transition
4688 * @sc: device handle
4692 * Called on state change transition. Completes the state
4693 * machine transition only - no HW interaction.
4696 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4697 struct ecore_func_sp_obj *o,
4698 enum ecore_func_cmd cmd)
4700 unsigned long cur_pending = o->pending;
4702 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4703 PMD_DRV_LOG(ERR, sc,
4704 "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4705 cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4710 ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
4711 cmd, ECORE_FUNC_ID(sc), o->next_state);
4713 o->state = o->next_state;
4714 o->next_state = ECORE_F_STATE_MAX;
4716 /* It's important that o->state and o->next_state are
4717 * updated before o->pending.
4721 ECORE_CLEAR_BIT(cmd, &o->pending);
4722 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4724 return ECORE_SUCCESS;
4728 * ecore_func_comp_cmd - complete the state change command
4730 * @sc: device handle
4734 * Checks that the arrived completion is expected.
4736 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4737 struct ecore_func_sp_obj *o,
4738 enum ecore_func_cmd cmd)
4740 /* Complete the state machine part first, check if it's a
4743 int rc = ecore_func_state_change_comp(sc, o, cmd);
4748 * ecore_func_chk_transition - perform function state machine transition
4750 * @sc: device handle
4754 * It both checks if the requested command is legal in a current
4755 * state and, if it's legal, sets a `next_state' in the object
4756 * that will be used in the completion flow to set the `state'
4759 * returns 0 if a requested command is a legal transition,
4760 * ECORE_INVAL otherwise.
4762 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4763 struct ecore_func_sp_obj *o,
4764 struct ecore_func_state_params *params)
4766 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4767 enum ecore_func_cmd cmd = params->cmd;
4769 /* Forget all pending for completion commands if a driver only state
4770 * transition has been requested.
4772 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4774 o->next_state = ECORE_F_STATE_MAX;
4777 /* Don't allow a next state transition if we are in the middle of
4784 case ECORE_F_STATE_RESET:
4785 if (cmd == ECORE_F_CMD_HW_INIT)
4786 next_state = ECORE_F_STATE_INITIALIZED;
4789 case ECORE_F_STATE_INITIALIZED:
4790 if (cmd == ECORE_F_CMD_START)
4791 next_state = ECORE_F_STATE_STARTED;
4793 else if (cmd == ECORE_F_CMD_HW_RESET)
4794 next_state = ECORE_F_STATE_RESET;
4797 case ECORE_F_STATE_STARTED:
4798 if (cmd == ECORE_F_CMD_STOP)
4799 next_state = ECORE_F_STATE_INITIALIZED;
4800 /* afex ramrods can be sent only in started mode, and only
4801 * if not pending for function_stop ramrod completion
4802 * for these events - next state remained STARTED.
4804 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4805 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4806 next_state = ECORE_F_STATE_STARTED;
4808 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4809 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4810 next_state = ECORE_F_STATE_STARTED;
4812 /* Switch_update ramrod can be sent in either started or
4813 * tx_stopped state, and it doesn't change the state.
4815 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4816 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4817 next_state = ECORE_F_STATE_STARTED;
4819 else if (cmd == ECORE_F_CMD_TX_STOP)
4820 next_state = ECORE_F_STATE_TX_STOPPED;
4823 case ECORE_F_STATE_TX_STOPPED:
4824 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4825 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4826 next_state = ECORE_F_STATE_TX_STOPPED;
4828 else if (cmd == ECORE_F_CMD_TX_START)
4829 next_state = ECORE_F_STATE_STARTED;
4833 PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
4836 /* Transition is assured */
4837 if (next_state != ECORE_F_STATE_MAX) {
4838 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
4839 state, cmd, next_state);
4840 o->next_state = next_state;
4841 return ECORE_SUCCESS;
4845 "Bad function state transition request: %d %d", state, cmd);
4851 * ecore_func_init_func - performs HW init at function stage
4853 * @sc: device handle
4856 * Init HW when the current phase is
4857 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4860 static int ecore_func_init_func(struct bnx2x_softc *sc,
4861 const struct ecore_func_sp_drv_ops *drv)
4863 return drv->init_hw_func(sc);
4867 * ecore_func_init_port - performs HW init at port stage
4869 * @sc: device handle
4872 * Init HW when the current phase is
4873 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4874 * FUNCTION-only HW blocks.
4877 static int ecore_func_init_port(struct bnx2x_softc *sc,
4878 const struct ecore_func_sp_drv_ops *drv)
4880 int rc = drv->init_hw_port(sc);
4884 return ecore_func_init_func(sc, drv);
4888 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4890 * @sc: device handle
4893 * Init HW when the current phase is
4894 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4895 * PORT-only and FUNCTION-only HW blocks.
4897 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4900 int rc = drv->init_hw_cmn_chip(sc);
4904 return ecore_func_init_port(sc, drv);
4908 * ecore_func_init_cmn - performs HW init at common stage
4910 * @sc: device handle
4913 * Init HW when the current phase is
4914 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4915 * PORT-only and FUNCTION-only HW blocks.
4917 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4918 const struct ecore_func_sp_drv_ops *drv)
4920 int rc = drv->init_hw_cmn(sc);
4924 return ecore_func_init_port(sc, drv);
4927 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4928 struct ecore_func_state_params *params)
4930 uint32_t load_code = params->params.hw_init.load_phase;
4931 struct ecore_func_sp_obj *o = params->f_obj;
4932 const struct ecore_func_sp_drv_ops *drv = o->drv;
4935 ECORE_MSG(sc, "function %d load_code %x",
4936 ECORE_ABS_FUNC_ID(sc), load_code);
4939 rc = drv->init_fw(sc);
4941 PMD_DRV_LOG(ERR, sc, "Error loading firmware");
4945 /* Handle the beginning of COMMON_XXX pases separately... */
4946 switch (load_code) {
4947 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4948 rc = ecore_func_init_cmn_chip(sc, drv);
4953 case FW_MSG_CODE_DRV_LOAD_COMMON:
4954 rc = ecore_func_init_cmn(sc, drv);
4959 case FW_MSG_CODE_DRV_LOAD_PORT:
4960 rc = ecore_func_init_port(sc, drv);
4965 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4966 rc = ecore_func_init_func(sc, drv);
4972 PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
4978 /* In case of success, complete the command immediately: no ramrods
4982 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4988 * ecore_func_reset_func - reset HW at function stage
4990 * @sc: device handle
4993 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4994 * FUNCTION-only HW blocks.
4996 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4999 drv->reset_hw_func(sc);
5003 * ecore_func_reset_port - reser HW at port stage
5005 * @sc: device handle
5008 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5009 * FUNCTION-only and PORT-only HW blocks.
5013 * It's important to call reset_port before reset_func() as the last thing
5014 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5015 * makes impossible any DMAE transactions.
5017 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5020 drv->reset_hw_port(sc);
5021 ecore_func_reset_func(sc, drv);
5025 * ecore_func_reset_cmn - reser HW at common stage
5027 * @sc: device handle
5030 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5031 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5032 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5034 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5035 const struct ecore_func_sp_drv_ops *drv)
5037 ecore_func_reset_port(sc, drv);
5038 drv->reset_hw_cmn(sc);
5041 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5042 struct ecore_func_state_params *params)
5044 uint32_t reset_phase = params->params.hw_reset.reset_phase;
5045 struct ecore_func_sp_obj *o = params->f_obj;
5046 const struct ecore_func_sp_drv_ops *drv = o->drv;
5048 ECORE_MSG(sc, "function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5051 switch (reset_phase) {
5052 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5053 ecore_func_reset_cmn(sc, drv);
5055 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5056 ecore_func_reset_port(sc, drv);
5058 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5059 ecore_func_reset_func(sc, drv);
5062 PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
5067 /* Complete the command immediately: no ramrods have been sent. */
5068 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5070 return ECORE_SUCCESS;
5073 static int ecore_func_send_start(struct bnx2x_softc *sc,
5074 struct ecore_func_state_params *params)
5076 struct ecore_func_sp_obj *o = params->f_obj;
5077 struct function_start_data *rdata =
5078 (struct function_start_data *)o->rdata;
5079 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5080 struct ecore_func_start_params *start_params = ¶ms->params.start;
5082 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5084 /* Fill the ramrod data with provided parameters */
5085 rdata->function_mode = (uint8_t) start_params->mf_mode;
5086 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5087 rdata->path_id = ECORE_PATH_ID(sc);
5088 rdata->network_cos_mode = start_params->network_cos_mode;
5089 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5090 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5093 * No need for an explicit memory barrier here as long we would
5094 * need to ensure the ordering of writing to the SPQ element
5095 * and updating of the SPQ producer which involves a memory
5096 * read and we will have to put a full memory barrier there
5097 * (inside ecore_sp_post()).
5100 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5101 data_mapping, NONE_CONNECTION_TYPE);
5104 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5107 struct ecore_func_sp_obj *o = params->f_obj;
5108 struct function_update_data *rdata =
5109 (struct function_update_data *)o->rdata;
5110 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5111 struct ecore_func_switch_update_params *switch_update_params =
5112 ¶ms->params.switch_update;
5114 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5116 /* Fill the ramrod data with provided parameters */
5117 rdata->tx_switch_suspend_change_flg = 1;
5118 rdata->tx_switch_suspend = switch_update_params->suspend;
5119 rdata->echo = SWITCH_UPDATE;
5121 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5122 data_mapping, NONE_CONNECTION_TYPE);
5125 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5128 struct ecore_func_sp_obj *o = params->f_obj;
5129 struct function_update_data *rdata =
5130 (struct function_update_data *)o->afex_rdata;
5131 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5132 struct ecore_func_afex_update_params *afex_update_params =
5133 ¶ms->params.afex_update;
5135 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5137 /* Fill the ramrod data with provided parameters */
5138 rdata->vif_id_change_flg = 1;
5139 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5140 rdata->afex_default_vlan_change_flg = 1;
5141 rdata->afex_default_vlan =
5142 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5143 rdata->allowed_priorities_change_flg = 1;
5144 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5145 rdata->echo = AFEX_UPDATE;
5147 /* No need for an explicit memory barrier here as long we would
5148 * need to ensure the ordering of writing to the SPQ element
5149 * and updating of the SPQ producer which involves a memory
5150 * read and we will have to put a full memory barrier there
5151 * (inside ecore_sp_post()).
5153 ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5155 rdata->afex_default_vlan, rdata->allowed_priorities);
5157 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5158 data_mapping, NONE_CONNECTION_TYPE);
5162 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5163 struct ecore_func_state_params *params)
5165 struct ecore_func_sp_obj *o = params->f_obj;
5166 struct afex_vif_list_ramrod_data *rdata =
5167 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5168 struct ecore_func_afex_viflists_params *afex_vif_params =
5169 ¶ms->params.afex_viflists;
5170 uint64_t *p_rdata = (uint64_t *) rdata;
5172 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5174 /* Fill the ramrod data with provided parameters */
5175 rdata->vif_list_index =
5176 ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5177 rdata->func_bit_map = afex_vif_params->func_bit_map;
5178 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5179 rdata->func_to_clear = afex_vif_params->func_to_clear;
5181 /* send in echo type of sub command */
5182 rdata->echo = afex_vif_params->afex_vif_list_command;
5184 /* No need for an explicit memory barrier here as long we would
5185 * need to ensure the ordering of writing to the SPQ element
5186 * and updating of the SPQ producer which involves a memory
5187 * read and we will have to put a full memory barrier there
5188 * (inside ecore_sp_post()).
5192 (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5193 rdata->afex_vif_list_command, rdata->vif_list_index,
5194 rdata->func_bit_map, rdata->func_to_clear);
5196 /* this ramrod sends data directly and not through DMA mapping */
5197 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5198 *p_rdata, NONE_CONNECTION_TYPE);
5201 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5202 ecore_func_state_params *params)
5204 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5205 NONE_CONNECTION_TYPE);
5208 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5209 ecore_func_state_params *params)
5211 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5212 NONE_CONNECTION_TYPE);
5215 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5218 struct ecore_func_sp_obj *o = params->f_obj;
5219 struct flow_control_configuration *rdata =
5220 (struct flow_control_configuration *)o->rdata;
5221 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5222 struct ecore_func_tx_start_params *tx_start_params =
5223 ¶ms->params.tx_start;
5226 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5228 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5229 rdata->dcb_version = tx_start_params->dcb_version;
5230 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5232 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5233 rdata->traffic_type_to_priority_cos[i] =
5234 tx_start_params->traffic_type_to_priority_cos[i];
5236 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5237 data_mapping, NONE_CONNECTION_TYPE);
5240 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5241 struct ecore_func_state_params *params)
5243 switch (params->cmd) {
5244 case ECORE_F_CMD_HW_INIT:
5245 return ecore_func_hw_init(sc, params);
5246 case ECORE_F_CMD_START:
5247 return ecore_func_send_start(sc, params);
5248 case ECORE_F_CMD_STOP:
5249 return ecore_func_send_stop(sc, params);
5250 case ECORE_F_CMD_HW_RESET:
5251 return ecore_func_hw_reset(sc, params);
5252 case ECORE_F_CMD_AFEX_UPDATE:
5253 return ecore_func_send_afex_update(sc, params);
5254 case ECORE_F_CMD_AFEX_VIFLISTS:
5255 return ecore_func_send_afex_viflists(sc, params);
5256 case ECORE_F_CMD_TX_STOP:
5257 return ecore_func_send_tx_stop(sc, params);
5258 case ECORE_F_CMD_TX_START:
5259 return ecore_func_send_tx_start(sc, params);
5260 case ECORE_F_CMD_SWITCH_UPDATE:
5261 return ecore_func_send_switch_update(sc, params);
5263 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
5268 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5269 struct ecore_func_sp_obj *obj,
5270 void *rdata, ecore_dma_addr_t rdata_mapping,
5271 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5272 struct ecore_func_sp_drv_ops *drv_iface)
5274 ECORE_MEMSET(obj, 0, sizeof(*obj));
5276 ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5279 obj->rdata_mapping = rdata_mapping;
5280 obj->afex_rdata = afex_rdata;
5281 obj->afex_rdata_mapping = afex_rdata_mapping;
5282 obj->send_cmd = ecore_func_send_cmd;
5283 obj->check_transition = ecore_func_chk_transition;
5284 obj->complete_cmd = ecore_func_comp_cmd;
5285 obj->wait_comp = ecore_func_wait_comp;
5286 obj->drv = drv_iface;
5290 * ecore_func_state_change - perform Function state change transition
5292 * @sc: device handle
5293 * @params: parameters to perform the transaction
5295 * returns 0 in case of successfully completed transition,
5296 * negative error code in case of failure, positive
5297 * (EBUSY) value if there is a completion to that is
5298 * still pending (possible only if RAMROD_COMP_WAIT is
5299 * not set in params->ramrod_flags for asynchronous
5302 int ecore_func_state_change(struct bnx2x_softc *sc,
5303 struct ecore_func_state_params *params)
5305 struct ecore_func_sp_obj *o = params->f_obj;
5307 enum ecore_func_cmd cmd = params->cmd;
5308 unsigned long *pending = &o->pending;
5310 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5312 /* Check that the requested transition is legal */
5313 rc = o->check_transition(sc, o, params);
5314 if ((rc == ECORE_BUSY) &&
5315 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5316 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5317 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5319 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5320 rc = o->check_transition(sc, o, params);
5322 if (rc == ECORE_BUSY) {
5323 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5324 PMD_DRV_LOG(ERR, sc,
5325 "timeout waiting for previous ramrod completion");
5329 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5333 /* Set "pending" bit */
5334 ECORE_SET_BIT(cmd, pending);
5336 /* Don't send a command if only driver cleanup was requested */
5337 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5338 ecore_func_state_change_comp(sc, o, cmd);
5339 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5342 rc = o->send_cmd(sc, params);
5344 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5347 o->next_state = ECORE_F_STATE_MAX;
5348 ECORE_CLEAR_BIT(cmd, pending);
5349 ECORE_SMP_MB_AFTER_CLEAR_BIT();
5353 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5354 rc = o->wait_comp(sc, o, cmd);
5358 return ECORE_SUCCESS;
5362 return ECORE_RET_PENDING(cmd, pending);
5365 /******************************************************************************
5367 * Calculates crc 8 on a word value: polynomial 0-1-2-8
5368 * Code was translated from Verilog.
5370 *****************************************************************************/
5371 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5379 /* split the data into 31 bits */
5380 for (i = 0; i < 32; i++) {
5381 D[i] = (uint8_t) (data & 1);
5385 /* split the crc into 8 bits */
5386 for (i = 0; i < 8; i++) {
5391 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5392 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5394 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5395 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5396 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5397 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5398 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5399 C[0] ^ C[1] ^ C[4] ^ C[5];
5400 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5401 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5402 C[1] ^ C[2] ^ C[5] ^ C[6];
5403 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5404 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5405 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5406 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5407 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5409 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5410 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5411 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5412 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5415 for (i = 0; i < 8; i++) {
5416 crc_res |= (NewCRC[i] << i);
5423 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5428 for (i = 0; i < 8; i++)
5429 crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);