4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * This work is largely based on the "vhost-user-scsi" implementation by
36 * SPDK(https://github.com/spdk/spdk).
47 #include <rte_atomic.h>
48 #include <rte_cycles.h>
50 #include <rte_malloc.h>
51 #include <rte_byteorder.h>
53 #include "vhost_scsi.h"
54 #include "scsi_spec.h"
56 #define INQ_OFFSET(field) (offsetof(struct scsi_cdb_inquiry_data, field) + \
57 sizeof(((struct scsi_cdb_inquiry_data *)0x0)->field))
60 vhost_strcpy_pad(void *dst, const char *src, size_t size, int pad)
66 memcpy(dst, src, len);
67 memset((char *)dst + len, pad, size - len);
69 memcpy(dst, src, size);
74 vhost_hex2bin(char ch)
76 if ((ch >= '0') && (ch <= '9'))
79 if ((ch >= 'a') && (ch <= 'f'))
85 vhost_bdev_scsi_set_naa_ieee_extended(const char *name, uint8_t *buf)
87 int i, value, count = 0;
88 uint64_t *temp64, local_value;
90 for (i = 0; (i < 16) && (name[i] != '\0'); i++) {
91 value = vhost_hex2bin(name[i]);
93 buf[count++] |= value << 4;
98 local_value = *(uint64_t *)buf;
100 * see spc3r23 7.6.3.6.2,
101 * NAA IEEE Extended identifer format
103 local_value &= 0x0fff000000ffffffull;
104 /* NAA 02, and 00 03 47 for IEEE Intel */
105 local_value |= 0x2000000347000000ull;
107 temp64 = (uint64_t *)buf;
108 *temp64 = rte_cpu_to_be_64(local_value);
112 scsi_task_build_sense_data(struct vhost_scsi_task *task, int sk,
118 resp_code = 0x70; /* Current + Fixed format */
121 cp = (uint8_t *)task->resp->sense;
123 /* VALID(7) RESPONSE CODE(6-0) */
124 cp[0] = 0x80 | resp_code;
127 /* FILEMARK(7) EOM(6) ILI(5) SENSE KEY(3-0) */
130 memset(&cp[3], 0, 4);
132 /* ADDITIONAL SENSE LENGTH */
135 /* COMMAND-SPECIFIC INFORMATION */
136 memset(&cp[8], 0, 4);
137 /* ADDITIONAL SENSE CODE */
139 /* ADDITIONAL SENSE CODE QUALIFIER */
141 /* FIELD REPLACEABLE UNIT CODE */
144 /* SKSV(7) SENSE KEY SPECIFIC(6-0,7-0,7-0) */
150 task->resp->sense_len = 18;
154 scsi_task_set_status(struct vhost_scsi_task *task, int sc, int sk,
157 if (sc == SCSI_STATUS_CHECK_CONDITION)
158 scsi_task_build_sense_data(task, sk, asc, ascq);
159 task->resp->status = sc;
163 vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
164 struct vhost_scsi_task *task)
167 uint32_t alloc_len = 0;
175 struct scsi_cdb_inquiry *inq;
177 inq = (struct scsi_cdb_inquiry *)task->req->cdb;
179 assert(task->iovs_cnt == 1);
181 /* At least 36Bytes for inquiry command */
182 if (task->data_len < 0x24)
185 pd = SPC_PERIPHERAL_DEVICE_TYPE_DISK;
187 evpd = inq->evpd & 0x1;
193 struct scsi_vpd_page *vpage = (struct scsi_vpd_page *)
194 task->iovs[0].iov_base;
196 /* PERIPHERAL QUALIFIER(7-5) PERIPHERAL DEVICE TYPE(4-0) */
197 vpage->peripheral = pd;
199 vpage->page_code = pc;
202 case SPC_VPD_SUPPORTED_VPD_PAGES:
204 vpage->params[0] = SPC_VPD_SUPPORTED_VPD_PAGES;
205 vpage->params[1] = SPC_VPD_UNIT_SERIAL_NUMBER;
206 vpage->params[2] = SPC_VPD_DEVICE_IDENTIFICATION;
209 vpage->alloc_len = rte_cpu_to_be_16(len);
211 case SPC_VPD_UNIT_SERIAL_NUMBER:
213 strncpy((char *)vpage->params, bdev->name, 32);
214 vpage->alloc_len = rte_cpu_to_be_16(32);
216 case SPC_VPD_DEVICE_IDENTIFICATION:
218 struct scsi_desig_desc *desig;
222 desig = (struct scsi_desig_desc *)buf;
223 desig->code_set = SPC_VPD_CODE_SET_BINARY;
224 desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
225 desig->type = SPC_VPD_IDENTIFIER_TYPE_NAA;
226 desig->association = SPC_VPD_ASSOCIATION_LOGICAL_UNIT;
227 desig->reserved0 = 0;
229 desig->reserved1 = 0;
231 vhost_bdev_scsi_set_naa_ieee_extended(bdev->name,
233 len = sizeof(struct scsi_desig_desc) + 8;
235 buf += sizeof(struct scsi_desig_desc) + desig->len;
237 /* T10 Vendor ID designator */
238 desig = (struct scsi_desig_desc *)buf;
239 desig->code_set = SPC_VPD_CODE_SET_ASCII;
240 desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
241 desig->type = SPC_VPD_IDENTIFIER_TYPE_T10_VENDOR_ID;
242 desig->association = SPC_VPD_ASSOCIATION_LOGICAL_UNIT;
243 desig->reserved0 = 0;
245 desig->reserved1 = 0;
246 desig->len = 8 + 16 + 32;
247 strncpy((char *)desig->desig, "INTEL", 8);
248 vhost_strcpy_pad((char *)&desig->desig[8],
249 bdev->product_name, 16, ' ');
250 strncpy((char *)&desig->desig[24], bdev->name, 32);
251 len += sizeof(struct scsi_desig_desc) + 8 + 16 + 32;
253 buf += sizeof(struct scsi_desig_desc) + desig->len;
255 /* SCSI Device Name designator */
256 desig = (struct scsi_desig_desc *)buf;
257 desig->code_set = SPC_VPD_CODE_SET_UTF8;
258 desig->protocol_id = SPC_PROTOCOL_IDENTIFIER_ISCSI;
259 desig->type = SPC_VPD_IDENTIFIER_TYPE_SCSI_NAME;
260 desig->association = SPC_VPD_ASSOCIATION_TARGET_DEVICE;
261 desig->reserved0 = 0;
263 desig->reserved1 = 0;
264 desig->len = snprintf((char *)desig->desig,
265 255, "%s", bdev->name);
266 len += sizeof(struct scsi_desig_desc) + desig->len;
268 buf += sizeof(struct scsi_desig_desc) + desig->len;
269 vpage->alloc_len = rte_cpu_to_be_16(len);
276 struct scsi_cdb_inquiry_data *inqdata =
277 (struct scsi_cdb_inquiry_data *)task->iovs[0].iov_base;
278 /* Standard INQUIRY data */
279 /* PERIPHERAL QUALIFIER(7-5) PERIPHERAL DEVICE TYPE(4-0) */
280 inqdata->peripheral = pd;
286 /* See SPC3/SBC2/MMC4/SAM2 for more details */
287 inqdata->version = SPC_VERSION_SPC3;
289 /* NORMACA(5) HISUP(4) RESPONSE DATA FORMAT(3-0) */
290 /* format 2 */ /* hierarchical support */
291 inqdata->response = 2 | 1 << 4;
295 /* SCCS(7) ACC(6) TPGS(5-4) 3PC(3) PROTECT(0) */
296 /* Not support TPGS */
300 inqdata->flags2 = 0x10;
302 /* WBUS16(5) SYNC(4) LINKED(3) CMDQUE(1) VS(0) */
304 inqdata->flags3 = 0x2;
306 /* T10 VENDOR IDENTIFICATION */
307 strncpy((char *)inqdata->t10_vendor_id, "INTEL", 8);
309 /* PRODUCT IDENTIFICATION */
310 strncpy((char *)inqdata->product_id, bdev->product_name, 16);
312 /* PRODUCT REVISION LEVEL */
313 strncpy((char *)inqdata->product_rev, "0001", 4);
315 /* Standard inquiry data ends here. Only populate
316 * remaining fields if alloc_len indicates enough
319 len = INQ_OFFSET(product_rev) - 5;
321 if (alloc_len >= INQ_OFFSET(vendor)) {
322 /* Vendor specific */
323 memset(inqdata->vendor, 0x20, 20);
324 len += sizeof(inqdata->vendor);
327 if (alloc_len >= INQ_OFFSET(ius)) {
328 /* CLOCKING(3-2) QAS(1) IUS(0) */
330 len += sizeof(inqdata->ius);
333 if (alloc_len >= INQ_OFFSET(reserved)) {
335 inqdata->reserved = 0;
336 len += sizeof(inqdata->reserved);
339 /* VERSION DESCRIPTOR 1-8 */
340 if (alloc_len >= INQ_OFFSET(reserved) + 2) {
341 temp16 = (uint16_t *)&inqdata->desc[0];
342 *temp16 = rte_cpu_to_be_16(0x0960);
346 if (alloc_len >= INQ_OFFSET(reserved) + 4) {
347 /* SPC-3 (no version claimed) */
348 temp16 = (uint16_t *)&inqdata->desc[2];
349 *temp16 = rte_cpu_to_be_16(0x0300);
353 if (alloc_len >= INQ_OFFSET(reserved) + 6) {
354 /* SBC-2 (no version claimed) */
355 temp16 = (uint16_t *)&inqdata->desc[4];
356 *temp16 = rte_cpu_to_be_16(0x0320);
360 if (alloc_len >= INQ_OFFSET(reserved) + 8) {
361 /* SAM-2 (no version claimed) */
362 temp16 = (uint16_t *)&inqdata->desc[6];
363 *temp16 = rte_cpu_to_be_16(0x0040);
367 if (alloc_len > INQ_OFFSET(reserved) + 8) {
368 i = alloc_len - (INQ_OFFSET(reserved) + 8);
371 memset(&inqdata->desc[8], 0, i);
375 /* ADDITIONAL LENGTH */
376 inqdata->add_len = len;
380 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
384 scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
385 SCSI_SENSE_ILLEGAL_REQUEST,
386 SCSI_ASC_INVALID_FIELD_IN_CDB,
387 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
392 vhost_bdev_scsi_readwrite(struct vhost_block_dev *bdev,
393 struct vhost_scsi_task *task,
394 uint64_t lba, __rte_unused uint32_t xfer_len)
400 offset = lba * bdev->blocklen;
402 for (i = 0; i < task->iovs_cnt; i++) {
403 if (task->dxfer_dir == SCSI_DIR_TO_DEV)
404 memcpy(bdev->data + offset, task->iovs[i].iov_base,
405 task->iovs[i].iov_len);
407 memcpy(task->iovs[i].iov_base, bdev->data + offset,
408 task->iovs[i].iov_len);
409 offset += task->iovs[i].iov_len;
410 nbytes += task->iovs[i].iov_len;
417 vhost_bdev_scsi_process_block(struct vhost_block_dev *bdev,
418 struct vhost_scsi_task *task)
420 uint64_t lba, *temp64;
421 uint32_t xfer_len, *temp32;
423 uint8_t *cdb = (uint8_t *)task->req->cdb;
428 lba = (uint64_t)cdb[1] << 16;
429 lba |= (uint64_t)cdb[2] << 8;
430 lba |= (uint64_t)cdb[3];
434 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
438 temp32 = (uint32_t *)&cdb[2];
439 lba = rte_be_to_cpu_32(*temp32);
440 temp16 = (uint16_t *)&cdb[7];
441 xfer_len = rte_be_to_cpu_16(*temp16);
442 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
446 temp32 = (uint32_t *)&cdb[2];
447 lba = rte_be_to_cpu_32(*temp32);
448 temp32 = (uint32_t *)&cdb[6];
449 xfer_len = rte_be_to_cpu_32(*temp32);
450 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
454 temp64 = (uint64_t *)&cdb[2];
455 lba = rte_be_to_cpu_64(*temp64);
456 temp32 = (uint32_t *)&cdb[10];
457 xfer_len = rte_be_to_cpu_32(*temp32);
458 return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);
460 case SBC_READ_CAPACITY_10: {
463 if (bdev->blockcnt - 1 > 0xffffffffULL)
464 memset(buffer, 0xff, 4);
466 temp32 = (uint32_t *)buffer;
467 *temp32 = rte_cpu_to_be_32(bdev->blockcnt - 1);
469 temp32 = (uint32_t *)&buffer[4];
470 *temp32 = rte_cpu_to_be_32(bdev->blocklen);
471 memcpy(task->iovs[0].iov_base, buffer, sizeof(buffer));
472 task->resp->status = SCSI_STATUS_GOOD;
473 return sizeof(buffer);
476 case SBC_SYNCHRONIZE_CACHE_10:
477 case SBC_SYNCHRONIZE_CACHE_16:
478 task->resp->status = SCSI_STATUS_GOOD;
482 scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
483 SCSI_SENSE_ILLEGAL_REQUEST,
484 SCSI_ASC_INVALID_FIELD_IN_CDB,
485 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
490 vhost_bdev_process_scsi_commands(struct vhost_block_dev *bdev,
491 struct vhost_scsi_task *task)
495 uint64_t *temp64, fmt_lun = 0;
498 uint8_t *cdb = (uint8_t *)task->req->cdb;
500 lun = (const uint8_t *)task->req->lun;
501 /* only 1 LUN supported */
502 if (lun[0] != 1 || lun[1] >= 1)
507 len = vhost_bdev_scsi_inquiry_command(bdev, task);
508 task->data_len = len;
510 case SPC_REPORT_LUNS:
511 data = (uint8_t *)task->iovs[0].iov_base;
512 fmt_lun |= (0x0ULL & 0x00ffULL) << 48;
513 temp64 = (uint64_t *)&data[8];
514 *temp64 = rte_cpu_to_be_64(fmt_lun);
515 temp32 = (uint32_t *)data;
516 *temp32 = rte_cpu_to_be_32(8);
518 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
520 case SPC_MODE_SELECT_6:
521 case SPC_MODE_SELECT_10:
522 /* don't support it now */
523 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
525 case SPC_MODE_SENSE_6:
526 case SPC_MODE_SENSE_10:
527 /* don't support it now */
528 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
530 case SPC_TEST_UNIT_READY:
531 scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
534 len = vhost_bdev_scsi_process_block(bdev, task);
535 task->data_len = len;