4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * This work is largely based on the "vhost-user-scsi" implementation by
36 * SPDK(https://github.com/spdk/spdk).
47 #include <rte_atomic.h>
48 #include <rte_cycles.h>
50 #include <rte_malloc.h>
51 #include <rte_byteorder.h>
53 #include "vhost_scsi.h"
54 #include "scsi_spec.h"
56 #define INQ_OFFSET(field) (offsetof(struct scsi_cdb_inquiry_data, field) + \
57 sizeof(((struct scsi_cdb_inquiry_data *)0x0)->field))
60 vhost_strcpy_pad(void *dst, const char *src, size_t size, int pad)
66 memcpy(dst, src, len);
67 memset((char *)dst + len, pad, size - len);
69 memcpy(dst, src, size);
74 vhost_hex2bin(char ch)
76 if ((ch >= '0') && (ch <= '9'))
79 if ((ch >= 'a') && (ch <= 'f'))
85 vhost_bdev_scsi_set_naa_ieee_extended(const char *name, uint8_t *buf)
87 int i, value, count = 0;
88 uint64_t *temp64, local_value;
90 for (i = 0; (i < 16) && (name[i] != '\0'); i++) {
91 value = vhost_hex2bin(name[i]);
93 buf[count++] |= value << 4;
98 local_value = *(uint64_t *)buf;
100 * see spc3r23 7.6.3.6.2,
101 * NAA IEEE Extended identifer format
103 local_value &= 0x0fff000000ffffffull;
104 /* NAA 02, and 00 03 47 for IEEE Intel */
105 local_value |= 0x2000000347000000ull;
107 temp64 = (uint64_t *)buf;
108 *temp64 = rte_cpu_to_be_64(local_value);
112 scsi_task_build_sense_data(struct vhost_scsi_task *task, int sk,
118 resp_code = 0x70; /* Current + Fixed format */
121 cp = (uint8_t *)task->resp->sense;
123 /* VALID(7) RESPONSE CODE(6-0) */
124 cp[0] = 0x80 | resp_code;
127 /* FILEMARK(7) EOM(6) ILI(5) SENSE KEY(3-0) */
130 memset(&cp[3], 0, 4);
132 /* ADDITIONAL SENSE LENGTH */
135 /* COMMAND-SPECIFIC INFORMATION */
136 memset(&cp[8], 0, 4);
137 /* ADDITIONAL SENSE CODE */
139 /* ADDITIONAL SENSE CODE QUALIFIER */
141 /* FIELD REPLACEABLE UNIT CODE */
144 /* SKSV(7) SENSE KEY SPECIFIC(6-0,7-0,7-0) */
150 task->resp->sense_len = 18;
154 scsi_task_set_status(struct vhost_scsi_task *task, int sc, int sk,
157 if (sc == SCSI_STATUS_CHECK_CONDITION)
158 scsi_task_build_sense_data(task, sk, asc, ascq);
159 task->resp->status = sc;
163 vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
164 struct vhost_scsi_task *task)
167 uint32_t alloc_len = 0;
175 struct scsi_cdb_inquiry *inq;
177 inq = (struct scsi_cdb_inquiry *)task->req->cdb;
179 assert(task->iovs_cnt == 1);
181 /* At least 36Bytes for inquiry command */
182 if (task->data_len < 0x24)
185 pd = SPC_PERIPHERAL_DEVICE_TYPE_DISK;
187 evpd = inq->evpd & 0x1;
193 struct scsi_vpd_page *vpage = (struct scsi_vpd_page *)
194 task->iovs[0].iov_base;
196 /* PERIPHERAL QUALIFIER(7-5) PERIPHERAL DEVICE TYPE(4-0) */
197 vpage->peripheral = pd;
199 vpage->page_code = pc;
202 case SPC_VPD_SUPPORTED_VPD_PAGES:
204 vpage->params[0] = SPC_VPD_SUPPORTED_VPD_PAGES;
205 vpage->params[1] = SPC_VPD_UNIT_SERIAL_NUMBER;
206 vpage->params[2] = SPC_VPD_DEVICE_IDENTIFICATION;
209 vpage->alloc_len = rte_cpu_to_be_16(len);
211 case SPC_VPD_UNIT_SERIAL_NUMBER:
213 strncpy((char *)vpage->params, bdev->name, 32);
214 vpage->alloc_len = rte_cpu_to_be_16(32);
216 case SPC_VPD_DEVICE_IDENTIFICATION:
218 struct scsi_desig_desc *desig;
222 desig = (struct scsi_desig_desc *)buf;
223 desig->code_set = SPC_VPD_CODE_SET_BINARY;
224 desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
225 desig->type = SPC_VPD_IDENTIFIER_TYPE_NAA;
226 desig->association = SPC_VPD_ASSOCIATION_LOGICAL_UNIT;
227 desig->reserved0 = 0;
229 desig->reserved1 = 0;
231 vhost_bdev_scsi_set_naa_ieee_extended(bdev->name,
233 len = sizeof(struct scsi_desig_desc) + 8;
235 buf += sizeof(struct scsi_desig_desc) + desig->len;
237 /* T10 Vendor ID designator */
238 desig = (struct scsi_desig_desc *)buf;
239 desig->code_set = SPC_VPD_CODE_SET_ASCII;
240 desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
241 desig->type = SPC_VPD_IDENTIFIER_TYPE_T10_VENDOR_ID;
242 desig->association = SPC_VPD_ASSOCIATION_LOGICAL_UNIT;
243 desig->reserved0 = 0;
245 desig->reserved1 = 0;
246 desig->len = 8 + 16 + 32;
247 strncpy((char *)desig->desig, "INTEL", 8);
248 vhost_strcpy_pad((char *)&desig->desig[8],
249 bdev->product_name, 16, ' ');
250 strncpy((char *)&desig->desig[24], bdev->name, 32);
251 len += sizeof(struct scsi_desig_desc) + 8 + 16 + 32;
253 buf += sizeof(struct scsi_desig_desc) + desig->len;
255 /* SCSI Device Name designator */
256 desig = (struct scsi_desig_desc *)buf;
257 desig->code_set = SPC_VPD_CODE_SET_UTF8;
258 desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
259 desig->type = SPC_VPD_IDENTIFIER_TYPE_SCSI_NAME;
260 desig->association = SPC_VPD_ASSOCIATION_TARGET_DEVICE;
261 desig->reserved0 = 0;
263 desig->reserved1 = 0;
264 desig->len = snprintf((char *)desig->desig,
265 255, "%s", bdev->name);
266 len += sizeof(struct scsi_desig_desc) + desig->len;
268 buf += sizeof(struct scsi_desig_desc) + desig->len;
269 vpage->alloc_len = rte_cpu_to_be_16(len);
276 struct scsi_cdb_inquiry_data *inqdata =
277 (struct scsi_cdb_inquiry_data *)task->iovs[0].iov_base;
278 /* Standard INQUIRY data */
279 /* PERIPHERAL QUALIFIER(7-5) PERIPHERAL DEVICE TYPE(4-0) */
280 inqdata->peripheral = pd;
286 /* See SPC3/SBC2/MMC4/SAM2 for more details */
287 inqdata->version = SPC_VERSION_SPC3;
289 /* NORMACA(5) HISUP(4) RESPONSE DATA FORMAT(3-0) */
290 /* format 2 */ /* hierarchical support */
291 inqdata->response = 2 | 1 << 4;
295 /* SCCS(7) ACC(6) TPGS(5-4) 3PC(3) PROTECT(0) */
296 /* Not support TPGS */
300 inqdata->flags2 = 0x10;
302 /* WBUS16(5) SYNC(4) LINKED(3) CMDQUE(1) VS(0) */
304 inqdata->flags3 = 0x2;
306 /* T10 VENDOR IDENTIFICATION */
307 strncpy((char *)inqdata->t10_vendor_id, "INTEL", 8);
309 /* PRODUCT IDENTIFICATION */
310 snprintf((char *)inqdata->product_id,
311 RTE_DIM(inqdata->product_id), "%s",
314 /* PRODUCT REVISION LEVEL */
315 strncpy((char *)inqdata->product_rev, "0001", 4);
317 /* Standard inquiry data ends here. Only populate
318 * remaining fields if alloc_len indicates enough
321 len = INQ_OFFSET(product_rev) - 5;
323 if (alloc_len >= INQ_OFFSET(vendor)) {
324 /* Vendor specific */
325 memset(inqdata->vendor, 0x20, 20);
326 len += sizeof(inqdata->vendor);
329 if (alloc_len >= INQ_OFFSET(ius)) {
330 /* CLOCKING(3-2) QAS(1) IUS(0) */
332 len += sizeof(inqdata->ius);
335 if (alloc_len >= INQ_OFFSET(reserved)) {
337 inqdata->reserved = 0;
338 len += sizeof(inqdata->reserved);
341 /* VERSION DESCRIPTOR 1-8 */
342 if (alloc_len >= INQ_OFFSET(reserved) + 2) {
343 temp16 = (uint16_t *)&inqdata->desc[0];
344 *temp16 = rte_cpu_to_be_16(0x0960);
348 if (alloc_len >= INQ_OFFSET(reserved) + 4) {
349 /* SPC-3 (no version claimed) */
350 temp16 = (uint16_t *)&inqdata->desc[2];
351 *temp16 = rte_cpu_to_be_16(0x0300);
355 if (alloc_len >= INQ_OFFSET(reserved) + 6) {
356 /* SBC-2 (no version claimed) */
357 temp16 = (uint16_t *)&inqdata->desc[4];
358 *temp16 = rte_cpu_to_be_16(0x0320);
362 if (alloc_len >= INQ_OFFSET(reserved) + 8) {
363 /* SAM-2 (no version claimed) */
364 temp16 = (uint16_t *)&inqdata->desc[6];
365 *temp16 = rte_cpu_to_be_16(0x0040);
369 if (alloc_len > INQ_OFFSET(reserved) + 8) {
370 i = alloc_len - (INQ_OFFSET(reserved) + 8);
373 memset(&inqdata->desc[8], 0, i);
377 /* ADDITIONAL LENGTH */
378 inqdata->add_len = len;
382 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
386 scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
387 SCSI_SENSE_ILLEGAL_REQUEST,
388 SCSI_ASC_INVALID_FIELD_IN_CDB,
389 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
394 vhost_bdev_scsi_readwrite(struct vhost_block_dev *bdev,
395 struct vhost_scsi_task *task,
396 uint64_t lba, __rte_unused uint32_t xfer_len)
402 offset = lba * bdev->blocklen;
404 for (i = 0; i < task->iovs_cnt; i++) {
405 if (task->dxfer_dir == SCSI_DIR_TO_DEV)
406 memcpy(bdev->data + offset, task->iovs[i].iov_base,
407 task->iovs[i].iov_len);
409 memcpy(task->iovs[i].iov_base, bdev->data + offset,
410 task->iovs[i].iov_len);
411 offset += task->iovs[i].iov_len;
412 nbytes += task->iovs[i].iov_len;
419 vhost_bdev_scsi_process_block(struct vhost_block_dev *bdev,
420 struct vhost_scsi_task *task)
422 uint64_t lba, *temp64;
423 uint32_t xfer_len, *temp32;
425 uint8_t *cdb = (uint8_t *)task->req->cdb;
430 lba = (uint64_t)cdb[1] << 16;
431 lba |= (uint64_t)cdb[2] << 8;
432 lba |= (uint64_t)cdb[3];
436 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
440 temp32 = (uint32_t *)&cdb[2];
441 lba = rte_be_to_cpu_32(*temp32);
442 temp16 = (uint16_t *)&cdb[7];
443 xfer_len = rte_be_to_cpu_16(*temp16);
444 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
448 temp32 = (uint32_t *)&cdb[2];
449 lba = rte_be_to_cpu_32(*temp32);
450 temp32 = (uint32_t *)&cdb[6];
451 xfer_len = rte_be_to_cpu_32(*temp32);
452 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
456 temp64 = (uint64_t *)&cdb[2];
457 lba = rte_be_to_cpu_64(*temp64);
458 temp32 = (uint32_t *)&cdb[10];
459 xfer_len = rte_be_to_cpu_32(*temp32);
460 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
462 case SBC_READ_CAPACITY_10: {
465 if (bdev->blockcnt - 1 > 0xffffffffULL)
466 memset(buffer, 0xff, 4);
468 temp32 = (uint32_t *)buffer;
469 *temp32 = rte_cpu_to_be_32(bdev->blockcnt - 1);
471 temp32 = (uint32_t *)&buffer[4];
472 *temp32 = rte_cpu_to_be_32(bdev->blocklen);
473 memcpy(task->iovs[0].iov_base, buffer, sizeof(buffer));
474 task->resp->status = SCSI_STATUS_GOOD;
475 return sizeof(buffer);
478 case SBC_SYNCHRONIZE_CACHE_10:
479 case SBC_SYNCHRONIZE_CACHE_16:
480 task->resp->status = SCSI_STATUS_GOOD;
484 scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
485 SCSI_SENSE_ILLEGAL_REQUEST,
486 SCSI_ASC_INVALID_FIELD_IN_CDB,
487 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
492 vhost_bdev_process_scsi_commands(struct vhost_block_dev *bdev,
493 struct vhost_scsi_task *task)
497 uint64_t *temp64, fmt_lun = 0;
500 uint8_t *cdb = (uint8_t *)task->req->cdb;
502 lun = (const uint8_t *)task->req->lun;
503 /* only 1 LUN supported */
504 if (lun[0] != 1 || lun[1] >= 1)
509 len = vhost_bdev_scsi_inquiry_command(bdev, task);
510 task->data_len = len;
512 case SPC_REPORT_LUNS:
513 data = (uint8_t *)task->iovs[0].iov_base;
514 fmt_lun |= (0x0ULL & 0x00ffULL) << 48;
515 temp64 = (uint64_t *)&data[8];
516 *temp64 = rte_cpu_to_be_64(fmt_lun);
517 temp32 = (uint32_t *)data;
518 *temp32 = rte_cpu_to_be_32(8);
520 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
522 case SPC_MODE_SELECT_6:
523 case SPC_MODE_SELECT_10:
524 /* don't support it now */
525 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
527 case SPC_MODE_SENSE_6:
528 case SPC_MODE_SENSE_10:
529 /* don't support it now */
530 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
532 case SPC_TEST_UNIT_READY:
533 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
536 len = vhost_bdev_scsi_process_block(bdev, task);
537 task->data_len = len;