From: Hemant Agrawal Date: Wed, 14 Mar 2018 07:55:59 +0000 (+0530) Subject: dpaa: prepare for 32-bit build X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=0e5607e4adab9bfbfbf1d6f25ab0e1f9d5f239a1;p=dpdk.git dpaa: prepare for 32-bit build This patch prepares the dpaa drivers for compilation on 32 bit machine. Signed-off-by: Hemant Agrawal --- diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c index bda62e0110..e6fd5f3ca0 100644 --- a/drivers/bus/dpaa/base/fman/fman.c +++ b/drivers/bus/dpaa/base/fman/fman.c @@ -300,7 +300,7 @@ fman_if_init(const struct device_node *dpa_node) _errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx); if (_errno) { - FMAN_ERR(-EINVAL, "Invalid register address: %lu", + FMAN_ERR(-EINVAL, "Invalid register address: %" PRIx64, regs_addr_host); goto err; } diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c index a1ef39216f..1381da3633 100644 --- a/drivers/bus/dpaa/base/qbman/bman_driver.c +++ b/drivers/bus/dpaa/base/qbman/bman_driver.c @@ -161,7 +161,7 @@ int bman_init_ccsr(const struct device_node *node) PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr); if (bman_ccsr_map == MAP_FAILED) { pr_err("Can not map BMan CCSR base Bman: " - "0x%x Phys: 0x%lx size 0x%lx", + "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64, *bman_addr, phys_addr, regs_size); return -EINVAL; } diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c index f2bb3b1580..3535da5f7c 100644 --- a/drivers/bus/dpaa/dpaa_bus.c +++ b/drivers/bus/dpaa/dpaa_bus.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -235,7 +234,7 @@ int rte_dpaa_portal_init(void *arg) BUS_INIT_FUNC_TRACE(); - if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY) + if ((size_t)arg == 1 || cpu == LCORE_ID_ANY) cpu = rte_get_master_lcore(); /* if the core id is not supported */ else diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 18681cf3a3..c5191cef9c 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -84,7 +84,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) dcbz_64(&ctx->job.sg[SG_CACHELINE_3]); ctx->ctx_pool = ses->ctx_pool; - ctx->vtop_offset = (uint64_t) ctx + ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); return ctx; @@ -97,7 +97,7 @@ dpaa_mem_vtop(void *vaddr) uint64_t vaddr_64, paddr; int i; - vaddr_64 = (uint64_t)vaddr; + vaddr_64 = (size_t)vaddr; for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { if (vaddr_64 >= memseg[i].addr_64 && vaddr_64 < memseg[i].addr_64 + memseg[i].len) { @@ -107,14 +107,14 @@ dpaa_mem_vtop(void *vaddr) return (rte_iova_t)paddr; } } - return (rte_iova_t)(NULL); + return (size_t)NULL; } /* virtual address conversin when mempool support is available for ctx */ static inline phys_addr_t dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr) { - return (uint64_t)vaddr - ctx->vtop_offset; + return (size_t)vaddr - ctx->vtop_offset; } static inline void * @@ -125,8 +125,8 @@ dpaa_mem_ptov(rte_iova_t paddr) for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { if (paddr >= memseg[i].iova && - (char *)paddr < (char *)memseg[i].iova + memseg[i].len) - return (void *)(memseg[i].addr_64 + + paddr < memseg[i].iova + memseg[i].len) + return (void *)(size_t)(memseg[i].addr_64 + (paddr - memseg[i].iova)); } return NULL; @@ -406,7 +406,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; @@ -424,7 +424,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -439,7 +439,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) PMD_TX_LOG(ERR, "not supported aead alg\n"); return -ENOTSUP; } - alginfo.key = (uint64_t)ses->aead_key.data; + alginfo.key = (size_t)ses->aead_key.data; alginfo.keylen = ses->aead_key.length; alginfo.key_enc_flags = 0; alginfo.key_type = RTA_DATA_IMM; @@ -463,7 +463,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; @@ -474,7 +474,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -493,15 +493,15 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) if (cdb->sh_desc[2] & 1) alginfo_c.key_type = RTA_DATA_IMM; else { - alginfo_c.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_c.key); + alginfo_c.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_c.key); alginfo_c.key_type = RTA_DATA_PTR; } if (cdb->sh_desc[2] & (1<<1)) alginfo_a.key_type = RTA_DATA_IMM; else { - alginfo_a.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_a.key); + alginfo_a.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_a.key); alginfo_a.key_type = RTA_DATA_PTR; } cdb->sh_desc[0] = 0; diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c index fb3b6ba0cd..7b82f4bd99 100644 --- a/drivers/mempool/dpaa/dpaa_mempool.c +++ b/drivers/mempool/dpaa/dpaa_mempool.c @@ -115,7 +115,8 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr) struct bm_buffer buf; int ret; - DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid); + DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d", + addr, bp_info->bpid); bm_buffer_set64(&buf, addr); retry: @@ -154,8 +155,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool, if (unlikely(!bp_info->ptov_off)) { /* buffers are from single mem segment */ if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) { - bp_info->ptov_off - = (uint64_t)obj_table[i] - phy; + bp_info->ptov_off = (size_t)obj_table[i] - phy; rte_dpaa_bpid_info[bp_info->bpid].ptov_off = bp_info->ptov_off; } @@ -282,8 +282,8 @@ dpaa_register_memory_area(const struct rte_mempool *mp, bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n", - len, total_elt_sz * mp->size); + DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n", + (uint64_t)len, total_elt_sz * mp->size); /* Detect pool area has sufficient space for elements in this memzone */ if (len >= total_elt_sz * mp->size) diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h index 9435dd2f8a..092f326cbc 100644 --- a/drivers/mempool/dpaa/dpaa_mempool.h +++ b/drivers/mempool/dpaa/dpaa_mempool.h @@ -46,7 +46,7 @@ static inline void * DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr) { if (bp_info->ptov_off) - return ((void *)(addr + bp_info->ptov_off)); + return ((void *) (size_t)(addr + bp_info->ptov_off)); return rte_dpaa_mem_ptov(addr); } diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c index 0dea8e79a9..bdb7f66021 100644 --- a/drivers/net/dpaa/dpaa_rxtx.c +++ b/drivers/net/dpaa/dpaa_rxtx.c @@ -59,7 +59,7 @@ } while (0) #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER) -void dpaa_display_frame(const struct qm_fd *fd) +static void dpaa_display_frame(const struct qm_fd *fd) { int ii; char *ptr; @@ -90,11 +90,10 @@ static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, /*TBD:XXX: to be implemented*/ } -static inline void dpaa_eth_packet_info(struct rte_mbuf *m, - uint64_t fd_virt_addr) +static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) { struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); - uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK; + uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); @@ -351,7 +350,7 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) prev_seg = cur_seg; } - dpaa_eth_packet_info(first_seg, (uint64_t)vaddr); + dpaa_eth_packet_info(first_seg, vaddr); rte_pktmbuf_free_seg(temp); return first_seg; @@ -394,7 +393,7 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); return mbuf; } @@ -455,7 +454,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); } } @@ -593,7 +592,7 @@ uint16_t dpaa_eth_queue_rx(void *q, static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) { int ret; - uint64_t buf = 0; + size_t buf = 0; struct bm_buffer bufs; ret = bman_acquire(bp_info->bp, &bufs, 1, 0); @@ -602,10 +601,10 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) return (void *)buf; } - DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d", + DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d", (uint64_t)bufs.addr, bufs.bpid); - buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) + buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) - bp_info->meta_data_size; if (!buf) goto out;