fix various typos found by Lintian
[dpdk.git] / drivers / compress / octeontx / otx_zip_pmd.c
index b670f87..bff8ef0 100644 (file)
@@ -11,6 +11,8 @@
 
 #include "otx_zip.h"
 
+int octtx_zip_logtype_driver;
+
 static const struct rte_compressdev_capabilities
                                octtx_zip_pmd_capabilities[] = {
        {       .algo = RTE_COMP_ALGO_DEFLATE,
@@ -28,6 +30,183 @@ static const struct rte_compressdev_capabilities
        RTE_COMP_END_OF_CAPABILITIES_LIST()
 };
 
+/*
+ * Reset session to default state for next set of stateless operation
+ */
+static inline void
+reset_stream(struct zip_stream *z_stream)
+{
+       union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
+
+       inst->s.bf = 1;
+       inst->s.ef = 0;
+}
+
+int
+zip_process_op(struct rte_comp_op *op,
+               struct zipvf_qp *qp,
+               struct zip_stream *zstrm)
+{
+       union zip_inst_s *inst = zstrm->inst;
+       volatile union zip_zres_s *zresult = NULL;
+
+
+       if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
+                       (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
+                       (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
+               op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+               ZIP_PMD_ERR("Segmented packet is not supported\n");
+               return 0;
+       }
+
+       zipvf_prepare_cmd_stateless(op, zstrm);
+
+       zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
+       zresult->s.compcode = 0;
+
+#ifdef ZIP_DBG
+       zip_dump_instruction(inst);
+#endif
+
+       /* Submit zip command */
+       zipvf_push_command(qp, (void *)inst);
+
+       /* Check and Process results in sync mode */
+       do {
+       } while (!zresult->s.compcode);
+
+       if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
+               op->status = RTE_COMP_OP_STATUS_SUCCESS;
+       } else {
+               /* FATAL error cannot do anything */
+               ZIP_PMD_ERR("operation failed with error code:%d\n",
+                       zresult->s.compcode);
+               if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
+                       op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+               else
+                       op->status = RTE_COMP_OP_STATUS_ERROR;
+       }
+
+       ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
+
+       /* Update op stats */
+       switch (op->status) {
+       case RTE_COMP_OP_STATUS_SUCCESS:
+               op->consumed = zresult->s.totalbytesread;
+       /* Fall-through */
+       case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+               op->produced = zresult->s.totalbyteswritten;
+               break;
+       default:
+               ZIP_PMD_ERR("stats not updated for status:%d\n",
+                               op->status);
+               break;
+       }
+       /* zstream is reset irrespective of result */
+       reset_stream(zstrm);
+
+       zresult->s.compcode = ZIP_COMP_E_NOTDONE;
+       return 0;
+}
+
+/** Parse xform parameters and setup a stream */
+static int
+zip_set_stream_parameters(struct rte_compressdev *dev,
+                       const struct rte_comp_xform *xform,
+                       struct zip_stream *z_stream)
+{
+       int ret;
+       union zip_inst_s *inst;
+       struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+       void *res;
+
+       /* Allocate resources required by a stream */
+       ret = rte_mempool_get_bulk(vf->zip_mp,
+                       z_stream->bufs, MAX_BUFS_PER_STREAM);
+       if (ret < 0)
+               return -1;
+
+       /* get one command buffer from pool and set up */
+       inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
+       res = z_stream->bufs[RES_BUF];
+
+       memset(inst->u, 0, sizeof(inst->u));
+
+       /* set bf for only first ops of stream */
+       inst->s.bf = 1;
+
+       if (xform->type == RTE_COMP_COMPRESS) {
+               inst->s.op = ZIP_OP_E_COMP;
+
+               switch (xform->compress.deflate.huffman) {
+               case RTE_COMP_HUFFMAN_DEFAULT:
+                       inst->s.cc = ZIP_CC_DEFAULT;
+                       break;
+               case RTE_COMP_HUFFMAN_FIXED:
+                       inst->s.cc = ZIP_CC_FIXED_HUFF;
+                       break;
+               case RTE_COMP_HUFFMAN_DYNAMIC:
+                       inst->s.cc = ZIP_CC_DYN_HUFF;
+                       break;
+               default:
+                       ret = -1;
+                       goto err;
+               }
+
+               switch (xform->compress.level) {
+               case RTE_COMP_LEVEL_MIN:
+                       inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
+                       break;
+               case RTE_COMP_LEVEL_MAX:
+                       inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
+                       break;
+               case RTE_COMP_LEVEL_NONE:
+                       ZIP_PMD_ERR("Compression level not supported");
+                       ret = -1;
+                       goto err;
+               default:
+                       /* for any value between min and max , choose
+                        * PMD default.
+                        */
+                       inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
+                       break;
+               }
+       } else if (xform->type == RTE_COMP_DECOMPRESS) {
+               inst->s.op = ZIP_OP_E_DECOMP;
+               /* from HRM,
+                * For DEFLATE decompression, [CC] must be 0x0.
+                * For decompression, [SS] must be 0x0
+                */
+               inst->s.cc = 0;
+               /* Speed bit should not be set for decompression */
+               inst->s.ss = 0;
+               /* decompression context is supported only for STATEFUL
+                * operations. Currently we support STATELESS ONLY so
+                * skip setting of ctx pointer
+                */
+
+       } else {
+               ZIP_PMD_ERR("\nxform type not supported");
+               ret = -1;
+               goto err;
+       }
+
+       inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
+       inst->s.res_ptr_ctl.s.length = 0;
+
+       z_stream->inst = inst;
+       z_stream->func = zip_process_op;
+
+       return 0;
+
+err:
+       rte_mempool_put_bulk(vf->zip_mp,
+                            (void *)&(z_stream->bufs[0]),
+                            MAX_BUFS_PER_STREAM);
+
+       return ret;
+}
+
 /** Configure device */
 static int
 zip_pmd_config(struct rte_compressdev *dev,
@@ -227,7 +406,7 @@ zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
 
        qp->name = name;
 
-       /* Create completion queue upto max_inflight_ops */
+       /* Create completion queue up to max_inflight_ops */
        qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
                                                max_inflight_ops, socket_id);
        if (qp->processed_pkts == NULL)
@@ -253,7 +432,110 @@ qp_setup_cleanup:
        return -1;
 }
 
-struct rte_compressdev_ops octtx_zip_pmd_ops = {
+static int
+zip_pmd_stream_create(struct rte_compressdev *dev,
+               const struct rte_comp_xform *xform, void **stream)
+{
+       int ret;
+       struct zip_stream *strm = NULL;
+
+       strm = rte_malloc(NULL,
+                       sizeof(struct zip_stream), 0);
+
+       if (strm == NULL)
+               return (-ENOMEM);
+
+       ret = zip_set_stream_parameters(dev, xform, strm);
+       if (ret < 0) {
+               ZIP_PMD_ERR("failed configure xform parameters");
+               rte_free(strm);
+               return ret;
+       }
+       *stream = strm;
+       return 0;
+}
+
+static int
+zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
+{
+       struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
+       struct zip_stream *z_stream;
+
+       if (stream == NULL)
+               return 0;
+
+       z_stream = (struct zip_stream *)stream;
+
+       /* Free resources back to pool */
+       rte_mempool_put_bulk(vf->zip_mp,
+                               (void *)&(z_stream->bufs[0]),
+                               MAX_BUFS_PER_STREAM);
+
+       /* Zero out the whole structure */
+       memset(stream, 0, sizeof(struct zip_stream));
+       rte_free(stream);
+
+       return 0;
+}
+
+
+static uint16_t
+zip_pmd_enqueue_burst_sync(void *queue_pair,
+               struct rte_comp_op **ops, uint16_t nb_ops)
+{
+       struct zipvf_qp *qp = queue_pair;
+       struct rte_comp_op *op;
+       struct zip_stream *zstrm;
+       int i, ret = 0;
+       uint16_t enqd = 0;
+
+       for (i = 0; i < nb_ops; i++) {
+               op = ops[i];
+
+               if (op->op_type == RTE_COMP_OP_STATEFUL) {
+                       op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+               } else {
+                       /* process stateless ops */
+                       zstrm = (struct zip_stream *)op->private_xform;
+                       if (unlikely(zstrm == NULL))
+                               op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+                       else
+                               ret = zstrm->func(op, qp, zstrm);
+               }
+
+               /* Whatever is out of op, put it into completion queue with
+                * its status
+                */
+               if (!ret)
+                       ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
+
+               if (unlikely(ret < 0)) {
+                       /* increment count if failed to enqueue op*/
+                       qp->qp_stats.enqueue_err_count++;
+               } else {
+                       qp->qp_stats.enqueued_count++;
+                       enqd++;
+               }
+       }
+       return enqd;
+}
+
+static uint16_t
+zip_pmd_dequeue_burst_sync(void *queue_pair,
+               struct rte_comp_op **ops, uint16_t nb_ops)
+{
+       struct zipvf_qp *qp = queue_pair;
+
+       unsigned int nb_dequeued = 0;
+
+       nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+                       (void **)ops, nb_ops, NULL);
+       qp->qp_stats.dequeued_count += nb_dequeued;
+
+       return nb_dequeued;
+}
+
+static struct rte_compressdev_ops octtx_zip_pmd_ops = {
                .dev_configure          = zip_pmd_config,
                .dev_start              = zip_pmd_start,
                .dev_stop               = zip_pmd_stop,
@@ -266,6 +548,11 @@ struct rte_compressdev_ops octtx_zip_pmd_ops = {
 
                .queue_pair_setup       = zip_pmd_qp_setup,
                .queue_pair_release     = zip_pmd_qp_release,
+
+               .private_xform_create   = zip_pmd_stream_create,
+               .private_xform_free     = zip_pmd_stream_free,
+               .stream_create          = NULL,
+               .stream_free            = NULL
 };
 
 static int
@@ -309,6 +596,8 @@ zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
        compressdev->dev_ops = &octtx_zip_pmd_ops;
        /* register rx/tx burst functions for data path */
+       compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
+       compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
        compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
        return ret;
 }
@@ -360,10 +649,7 @@ static struct rte_pci_driver octtx_zip_pmd = {
 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
 
-RTE_INIT(octtx_zip_init_log);
-
-static void
-octtx_zip_init_log(void)
+RTE_INIT(octtx_zip_init_log)
 {
        octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
        if (octtx_zip_logtype_driver >= 0)