| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright (C) 1999 Eric Youngdale | 
|---|
| 4 | * Copyright (C) 2014 Christoph Hellwig | 
|---|
| 5 | * | 
|---|
| 6 | *  SCSI queueing library. | 
|---|
| 7 | *      Initial versions: Eric Youngdale (eric@andante.org). | 
|---|
| 8 | *                        Based upon conversations with large numbers | 
|---|
| 9 | *                        of people at Linux Expo. | 
|---|
| 10 | */ | 
|---|
| 11 |  | 
|---|
| 12 | #include <linux/bio.h> | 
|---|
| 13 | #include <linux/bitops.h> | 
|---|
| 14 | #include <linux/blkdev.h> | 
|---|
| 15 | #include <linux/completion.h> | 
|---|
| 16 | #include <linux/kernel.h> | 
|---|
| 17 | #include <linux/export.h> | 
|---|
| 18 | #include <linux/init.h> | 
|---|
| 19 | #include <linux/pci.h> | 
|---|
| 20 | #include <linux/delay.h> | 
|---|
| 21 | #include <linux/hardirq.h> | 
|---|
| 22 | #include <linux/scatterlist.h> | 
|---|
| 23 | #include <linux/blk-mq.h> | 
|---|
| 24 | #include <linux/blk-integrity.h> | 
|---|
| 25 | #include <linux/ratelimit.h> | 
|---|
| 26 | #include <linux/unaligned.h> | 
|---|
| 27 |  | 
|---|
| 28 | #include <scsi/scsi.h> | 
|---|
| 29 | #include <scsi/scsi_cmnd.h> | 
|---|
| 30 | #include <scsi/scsi_dbg.h> | 
|---|
| 31 | #include <scsi/scsi_device.h> | 
|---|
| 32 | #include <scsi/scsi_driver.h> | 
|---|
| 33 | #include <scsi/scsi_eh.h> | 
|---|
| 34 | #include <scsi/scsi_host.h> | 
|---|
| 35 | #include <scsi/scsi_transport.h> /* scsi_init_limits() */ | 
|---|
| 36 | #include <scsi/scsi_dh.h> | 
|---|
| 37 |  | 
|---|
| 38 | #include <trace/events/scsi.h> | 
|---|
| 39 |  | 
|---|
| 40 | #include "scsi_debugfs.h" | 
|---|
| 41 | #include "scsi_priv.h" | 
|---|
| 42 | #include "scsi_logging.h" | 
|---|
| 43 |  | 
|---|
| 44 | /* | 
|---|
| 45 | * Size of integrity metadata is usually small, 1 inline sg should | 
|---|
| 46 | * cover normal cases. | 
|---|
| 47 | */ | 
|---|
| 48 | #ifdef CONFIG_ARCH_NO_SG_CHAIN | 
|---|
| 49 | #define  SCSI_INLINE_PROT_SG_CNT  0 | 
|---|
| 50 | #define  SCSI_INLINE_SG_CNT  0 | 
|---|
| 51 | #else | 
|---|
| 52 | #define  SCSI_INLINE_PROT_SG_CNT  1 | 
|---|
| 53 | #define  SCSI_INLINE_SG_CNT  2 | 
|---|
| 54 | #endif | 
|---|
| 55 |  | 
|---|
| 56 | static struct kmem_cache *scsi_sense_cache; | 
|---|
| 57 | static DEFINE_MUTEX(scsi_sense_cache_mutex); | 
|---|
| 58 |  | 
|---|
| 59 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); | 
|---|
| 60 |  | 
|---|
| 61 | int scsi_init_sense_cache(struct Scsi_Host *shost) | 
|---|
| 62 | { | 
|---|
| 63 | int ret = 0; | 
|---|
| 64 |  | 
|---|
| 65 | mutex_lock(lock: &scsi_sense_cache_mutex); | 
|---|
| 66 | if (!scsi_sense_cache) { | 
|---|
| 67 | scsi_sense_cache = | 
|---|
| 68 | kmem_cache_create_usercopy(name: "scsi_sense_cache", | 
|---|
| 69 | SCSI_SENSE_BUFFERSIZE, align: 0, SLAB_HWCACHE_ALIGN, | 
|---|
| 70 | useroffset: 0, SCSI_SENSE_BUFFERSIZE, NULL); | 
|---|
| 71 | if (!scsi_sense_cache) | 
|---|
| 72 | ret = -ENOMEM; | 
|---|
| 73 | } | 
|---|
| 74 | mutex_unlock(lock: &scsi_sense_cache_mutex); | 
|---|
| 75 | return ret; | 
|---|
| 76 | } | 
|---|
| 77 |  | 
|---|
| 78 | static void | 
|---|
| 79 | scsi_set_blocked(struct scsi_cmnd *cmd, int reason) | 
|---|
| 80 | { | 
|---|
| 81 | struct Scsi_Host *host = cmd->device->host; | 
|---|
| 82 | struct scsi_device *device = cmd->device; | 
|---|
| 83 | struct scsi_target *starget = scsi_target(sdev: device); | 
|---|
| 84 |  | 
|---|
| 85 | /* | 
|---|
| 86 | * Set the appropriate busy bit for the device/host. | 
|---|
| 87 | * | 
|---|
| 88 | * If the host/device isn't busy, assume that something actually | 
|---|
| 89 | * completed, and that we should be able to queue a command now. | 
|---|
| 90 | * | 
|---|
| 91 | * Note that the prior mid-layer assumption that any host could | 
|---|
| 92 | * always queue at least one command is now broken.  The mid-layer | 
|---|
| 93 | * will implement a user specifiable stall (see | 
|---|
| 94 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) | 
|---|
| 95 | * if a command is requeued with no other commands outstanding | 
|---|
| 96 | * either for the device or for the host. | 
|---|
| 97 | */ | 
|---|
| 98 | switch (reason) { | 
|---|
| 99 | case SCSI_MLQUEUE_HOST_BUSY: | 
|---|
| 100 | atomic_set(v: &host->host_blocked, i: host->max_host_blocked); | 
|---|
| 101 | break; | 
|---|
| 102 | case SCSI_MLQUEUE_DEVICE_BUSY: | 
|---|
| 103 | case SCSI_MLQUEUE_EH_RETRY: | 
|---|
| 104 | atomic_set(v: &device->device_blocked, | 
|---|
| 105 | i: device->max_device_blocked); | 
|---|
| 106 | break; | 
|---|
| 107 | case SCSI_MLQUEUE_TARGET_BUSY: | 
|---|
| 108 | atomic_set(v: &starget->target_blocked, | 
|---|
| 109 | i: starget->max_target_blocked); | 
|---|
| 110 | break; | 
|---|
| 111 | } | 
|---|
| 112 | } | 
|---|
| 113 |  | 
|---|
| 114 | static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) | 
|---|
| 115 | { | 
|---|
| 116 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 117 |  | 
|---|
| 118 | if (rq->rq_flags & RQF_DONTPREP) { | 
|---|
| 119 | rq->rq_flags &= ~RQF_DONTPREP; | 
|---|
| 120 | scsi_mq_uninit_cmd(cmd); | 
|---|
| 121 | } else { | 
|---|
| 122 | WARN_ON_ONCE(true); | 
|---|
| 123 | } | 
|---|
| 124 |  | 
|---|
| 125 | blk_mq_requeue_request(rq, kick_requeue_list: false); | 
|---|
| 126 | if (!scsi_host_in_recovery(shost: cmd->device->host)) | 
|---|
| 127 | blk_mq_delay_kick_requeue_list(q: rq->q, msecs); | 
|---|
| 128 | } | 
|---|
| 129 |  | 
|---|
| 130 | /** | 
|---|
| 131 | * __scsi_queue_insert - private queue insertion | 
|---|
| 132 | * @cmd: The SCSI command being requeued | 
|---|
| 133 | * @reason:  The reason for the requeue | 
|---|
| 134 | * @unbusy: Whether the queue should be unbusied | 
|---|
| 135 | * | 
|---|
| 136 | * This is a private queue insertion.  The public interface | 
|---|
| 137 | * scsi_queue_insert() always assumes the queue should be unbusied | 
|---|
| 138 | * because it's always called before the completion.  This function is | 
|---|
| 139 | * for a requeue after completion, which should only occur in this | 
|---|
| 140 | * file. | 
|---|
| 141 | */ | 
|---|
| 142 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) | 
|---|
| 143 | { | 
|---|
| 144 | struct scsi_device *device = cmd->device; | 
|---|
| 145 |  | 
|---|
| 146 | SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, | 
|---|
| 147 | "Inserting command %p into mlqueue\n", cmd)); | 
|---|
| 148 |  | 
|---|
| 149 | scsi_set_blocked(cmd, reason); | 
|---|
| 150 |  | 
|---|
| 151 | /* | 
|---|
| 152 | * Decrement the counters, since these commands are no longer | 
|---|
| 153 | * active on the host/device. | 
|---|
| 154 | */ | 
|---|
| 155 | if (unbusy) | 
|---|
| 156 | scsi_device_unbusy(sdev: device, cmd); | 
|---|
| 157 |  | 
|---|
| 158 | /* | 
|---|
| 159 | * Requeue this command.  It will go before all other commands | 
|---|
| 160 | * that are already in the queue. Schedule requeue work under | 
|---|
| 161 | * lock such that the kblockd_schedule_work() call happens | 
|---|
| 162 | * before blk_mq_destroy_queue() finishes. | 
|---|
| 163 | */ | 
|---|
| 164 | cmd->result = 0; | 
|---|
| 165 |  | 
|---|
| 166 | blk_mq_requeue_request(rq: scsi_cmd_to_rq(scmd: cmd), | 
|---|
| 167 | kick_requeue_list: !scsi_host_in_recovery(shost: cmd->device->host)); | 
|---|
| 168 | } | 
|---|
| 169 |  | 
|---|
| 170 | /** | 
|---|
| 171 | * scsi_queue_insert - Reinsert a command in the queue. | 
|---|
| 172 | * @cmd:    command that we are adding to queue. | 
|---|
| 173 | * @reason: why we are inserting command to queue. | 
|---|
| 174 | * | 
|---|
| 175 | * We do this for one of two cases. Either the host is busy and it cannot accept | 
|---|
| 176 | * any more commands for the time being, or the device returned QUEUE_FULL and | 
|---|
| 177 | * can accept no more commands. | 
|---|
| 178 | * | 
|---|
| 179 | * Context: This could be called either from an interrupt context or a normal | 
|---|
| 180 | * process context. | 
|---|
| 181 | */ | 
|---|
| 182 | void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | 
|---|
| 183 | { | 
|---|
| 184 | __scsi_queue_insert(cmd, reason, unbusy: true); | 
|---|
| 185 | } | 
|---|
| 186 |  | 
|---|
| 187 | /** | 
|---|
| 188 | * scsi_failures_reset_retries - reset all failures to zero | 
|---|
| 189 | * @failures: &struct scsi_failures with specific failure modes set | 
|---|
| 190 | */ | 
|---|
| 191 | void scsi_failures_reset_retries(struct scsi_failures *failures) | 
|---|
| 192 | { | 
|---|
| 193 | struct scsi_failure *failure; | 
|---|
| 194 |  | 
|---|
| 195 | failures->total_retries = 0; | 
|---|
| 196 |  | 
|---|
| 197 | for (failure = failures->failure_definitions; failure->result; | 
|---|
| 198 | failure++) | 
|---|
| 199 | failure->retries = 0; | 
|---|
| 200 | } | 
|---|
| 201 | EXPORT_SYMBOL_GPL(scsi_failures_reset_retries); | 
|---|
| 202 |  | 
|---|
| 203 | /** | 
|---|
| 204 | * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry. | 
|---|
| 205 | * @scmd: scsi_cmnd to check. | 
|---|
| 206 | * @failures: scsi_failures struct that lists failures to check for. | 
|---|
| 207 | * | 
|---|
| 208 | * Returns -EAGAIN if the caller should retry else 0. | 
|---|
| 209 | */ | 
|---|
| 210 | static int scsi_check_passthrough(struct scsi_cmnd *scmd, | 
|---|
| 211 | struct scsi_failures *failures) | 
|---|
| 212 | { | 
|---|
| 213 | struct scsi_failure *failure; | 
|---|
| 214 | struct scsi_sense_hdr sshdr; | 
|---|
| 215 | enum sam_status status; | 
|---|
| 216 |  | 
|---|
| 217 | if (!scmd->result) | 
|---|
| 218 | return 0; | 
|---|
| 219 |  | 
|---|
| 220 | if (!failures) | 
|---|
| 221 | return 0; | 
|---|
| 222 |  | 
|---|
| 223 | for (failure = failures->failure_definitions; failure->result; | 
|---|
| 224 | failure++) { | 
|---|
| 225 | if (failure->result == SCMD_FAILURE_RESULT_ANY) | 
|---|
| 226 | goto maybe_retry; | 
|---|
| 227 |  | 
|---|
| 228 | if (host_byte(scmd->result) && | 
|---|
| 229 | host_byte(scmd->result) == host_byte(failure->result)) | 
|---|
| 230 | goto maybe_retry; | 
|---|
| 231 |  | 
|---|
| 232 | status = status_byte(scmd->result); | 
|---|
| 233 | if (!status) | 
|---|
| 234 | continue; | 
|---|
| 235 |  | 
|---|
| 236 | if (failure->result == SCMD_FAILURE_STAT_ANY && | 
|---|
| 237 | !scsi_status_is_good(status: scmd->result)) | 
|---|
| 238 | goto maybe_retry; | 
|---|
| 239 |  | 
|---|
| 240 | if (status != status_byte(failure->result)) | 
|---|
| 241 | continue; | 
|---|
| 242 |  | 
|---|
| 243 | if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION || | 
|---|
| 244 | failure->sense == SCMD_FAILURE_SENSE_ANY) | 
|---|
| 245 | goto maybe_retry; | 
|---|
| 246 |  | 
|---|
| 247 | if (!scsi_command_normalize_sense(cmd: scmd, sshdr: &sshdr)) | 
|---|
| 248 | return 0; | 
|---|
| 249 |  | 
|---|
| 250 | if (failure->sense != sshdr.sense_key) | 
|---|
| 251 | continue; | 
|---|
| 252 |  | 
|---|
| 253 | if (failure->asc == SCMD_FAILURE_ASC_ANY) | 
|---|
| 254 | goto maybe_retry; | 
|---|
| 255 |  | 
|---|
| 256 | if (failure->asc != sshdr.asc) | 
|---|
| 257 | continue; | 
|---|
| 258 |  | 
|---|
| 259 | if (failure->ascq == SCMD_FAILURE_ASCQ_ANY || | 
|---|
| 260 | failure->ascq == sshdr.ascq) | 
|---|
| 261 | goto maybe_retry; | 
|---|
| 262 | } | 
|---|
| 263 |  | 
|---|
| 264 | return 0; | 
|---|
| 265 |  | 
|---|
| 266 | maybe_retry: | 
|---|
| 267 | if (failure->allowed) { | 
|---|
| 268 | if (failure->allowed == SCMD_FAILURE_NO_LIMIT || | 
|---|
| 269 | ++failure->retries <= failure->allowed) | 
|---|
| 270 | return -EAGAIN; | 
|---|
| 271 | } else { | 
|---|
| 272 | if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT || | 
|---|
| 273 | ++failures->total_retries <= failures->total_allowed) | 
|---|
| 274 | return -EAGAIN; | 
|---|
| 275 | } | 
|---|
| 276 |  | 
|---|
| 277 | return 0; | 
|---|
| 278 | } | 
|---|
| 279 |  | 
|---|
| 280 | /** | 
|---|
| 281 | * scsi_execute_cmd - insert request and wait for the result | 
|---|
| 282 | * @sdev:	scsi_device | 
|---|
| 283 | * @cmd:	scsi command | 
|---|
| 284 | * @opf:	block layer request cmd_flags | 
|---|
| 285 | * @buffer:	data buffer | 
|---|
| 286 | * @bufflen:	len of buffer | 
|---|
| 287 | * @timeout:	request timeout in HZ | 
|---|
| 288 | * @ml_retries:	number of times SCSI midlayer will retry request | 
|---|
| 289 | * @args:	Optional args. See struct definition for field descriptions | 
|---|
| 290 | * | 
|---|
| 291 | * Returns the scsi_cmnd result field if a command was executed, or a negative | 
|---|
| 292 | * Linux error code if we didn't get that far. | 
|---|
| 293 | */ | 
|---|
| 294 | int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, | 
|---|
| 295 | blk_opf_t opf, void *buffer, unsigned int bufflen, | 
|---|
| 296 | int timeout, int ml_retries, | 
|---|
| 297 | const struct scsi_exec_args *args) | 
|---|
| 298 | { | 
|---|
| 299 | static const struct scsi_exec_args default_args; | 
|---|
| 300 | struct request *req; | 
|---|
| 301 | struct scsi_cmnd *scmd; | 
|---|
| 302 | int ret; | 
|---|
| 303 |  | 
|---|
| 304 | if (!args) | 
|---|
| 305 | args = &default_args; | 
|---|
| 306 | else if (WARN_ON_ONCE(args->sense && | 
|---|
| 307 | args->sense_len != SCSI_SENSE_BUFFERSIZE)) | 
|---|
| 308 | return -EINVAL; | 
|---|
| 309 |  | 
|---|
| 310 | retry: | 
|---|
| 311 | req = scsi_alloc_request(q: sdev->request_queue, opf, flags: args->req_flags); | 
|---|
| 312 | if (IS_ERR(ptr: req)) | 
|---|
| 313 | return PTR_ERR(ptr: req); | 
|---|
| 314 |  | 
|---|
| 315 | if (bufflen) { | 
|---|
| 316 | ret = blk_rq_map_kern(rq: req, kbuf: buffer, len: bufflen, GFP_NOIO); | 
|---|
| 317 | if (ret) | 
|---|
| 318 | goto out; | 
|---|
| 319 | } | 
|---|
| 320 | scmd = blk_mq_rq_to_pdu(rq: req); | 
|---|
| 321 | scmd->cmd_len = COMMAND_SIZE(cmd[0]); | 
|---|
| 322 | memcpy(to: scmd->cmnd, from: cmd, len: scmd->cmd_len); | 
|---|
| 323 | scmd->allowed = ml_retries; | 
|---|
| 324 | scmd->flags |= args->scmd_flags; | 
|---|
| 325 | req->timeout = timeout; | 
|---|
| 326 | req->rq_flags |= RQF_QUIET; | 
|---|
| 327 |  | 
|---|
| 328 | /* | 
|---|
| 329 | * head injection *required* here otherwise quiesce won't work | 
|---|
| 330 | */ | 
|---|
| 331 | blk_execute_rq(rq: req, at_head: true); | 
|---|
| 332 |  | 
|---|
| 333 | if (scsi_check_passthrough(scmd, failures: args->failures) == -EAGAIN) { | 
|---|
| 334 | blk_mq_free_request(rq: req); | 
|---|
| 335 | goto retry; | 
|---|
| 336 | } | 
|---|
| 337 |  | 
|---|
| 338 | /* | 
|---|
| 339 | * Some devices (USB mass-storage in particular) may transfer | 
|---|
| 340 | * garbage data together with a residue indicating that the data | 
|---|
| 341 | * is invalid.  Prevent the garbage from being misinterpreted | 
|---|
| 342 | * and prevent security leaks by zeroing out the excess data. | 
|---|
| 343 | */ | 
|---|
| 344 | if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen)) | 
|---|
| 345 | memset(s: buffer + bufflen - scmd->resid_len, c: 0, n: scmd->resid_len); | 
|---|
| 346 |  | 
|---|
| 347 | if (args->resid) | 
|---|
| 348 | *args->resid = scmd->resid_len; | 
|---|
| 349 | if (args->sense) | 
|---|
| 350 | memcpy(to: args->sense, from: scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); | 
|---|
| 351 | if (args->sshdr) | 
|---|
| 352 | scsi_normalize_sense(sense_buffer: scmd->sense_buffer, sb_len: scmd->sense_len, | 
|---|
| 353 | sshdr: args->sshdr); | 
|---|
| 354 |  | 
|---|
| 355 | ret = scmd->result; | 
|---|
| 356 | out: | 
|---|
| 357 | blk_mq_free_request(rq: req); | 
|---|
| 358 |  | 
|---|
| 359 | return ret; | 
|---|
| 360 | } | 
|---|
| 361 | EXPORT_SYMBOL(scsi_execute_cmd); | 
|---|
| 362 |  | 
|---|
| 363 | /* | 
|---|
| 364 | * Wake up the error handler if necessary. Avoid as follows that the error | 
|---|
| 365 | * handler is not woken up if host in-flight requests number == | 
|---|
| 366 | * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination | 
|---|
| 367 | * with an RCU read lock in this function to ensure that this function in | 
|---|
| 368 | * its entirety either finishes before scsi_eh_scmd_add() increases the | 
|---|
| 369 | * host_failed counter or that it notices the shost state change made by | 
|---|
| 370 | * scsi_eh_scmd_add(). | 
|---|
| 371 | */ | 
|---|
| 372 | static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | 
|---|
| 373 | { | 
|---|
| 374 | unsigned long flags; | 
|---|
| 375 |  | 
|---|
| 376 | rcu_read_lock(); | 
|---|
| 377 | __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); | 
|---|
| 378 | if (unlikely(scsi_host_in_recovery(shost))) { | 
|---|
| 379 | unsigned int busy = scsi_host_busy(shost); | 
|---|
| 380 |  | 
|---|
| 381 | spin_lock_irqsave(shost->host_lock, flags); | 
|---|
| 382 | if (shost->host_failed || shost->host_eh_scheduled) | 
|---|
| 383 | scsi_eh_wakeup(shost, busy); | 
|---|
| 384 | spin_unlock_irqrestore(lock: shost->host_lock, flags); | 
|---|
| 385 | } | 
|---|
| 386 | rcu_read_unlock(); | 
|---|
| 387 | } | 
|---|
| 388 |  | 
|---|
| 389 | void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) | 
|---|
| 390 | { | 
|---|
| 391 | struct Scsi_Host *shost = sdev->host; | 
|---|
| 392 | struct scsi_target *starget = scsi_target(sdev); | 
|---|
| 393 |  | 
|---|
| 394 | scsi_dec_host_busy(shost, cmd); | 
|---|
| 395 |  | 
|---|
| 396 | if (starget->can_queue > 0) | 
|---|
| 397 | atomic_dec(v: &starget->target_busy); | 
|---|
| 398 |  | 
|---|
| 399 | sbitmap_put(sb: &sdev->budget_map, bitnr: cmd->budget_token); | 
|---|
| 400 | cmd->budget_token = -1; | 
|---|
| 401 | } | 
|---|
| 402 |  | 
|---|
| 403 | /* | 
|---|
| 404 | * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with | 
|---|
| 405 | * interrupts disabled. | 
|---|
| 406 | */ | 
|---|
| 407 | static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) | 
|---|
| 408 | { | 
|---|
| 409 | struct scsi_device *current_sdev = data; | 
|---|
| 410 |  | 
|---|
| 411 | if (sdev != current_sdev) | 
|---|
| 412 | blk_mq_run_hw_queues(q: sdev->request_queue, async: true); | 
|---|
| 413 | } | 
|---|
| 414 |  | 
|---|
| 415 | /* | 
|---|
| 416 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, | 
|---|
| 417 | * and call blk_run_queue for all the scsi_devices on the target - | 
|---|
| 418 | * including current_sdev first. | 
|---|
| 419 | * | 
|---|
| 420 | * Called with *no* scsi locks held. | 
|---|
| 421 | */ | 
|---|
| 422 | static void scsi_single_lun_run(struct scsi_device *current_sdev) | 
|---|
| 423 | { | 
|---|
| 424 | struct Scsi_Host *shost = current_sdev->host; | 
|---|
| 425 | struct scsi_target *starget = scsi_target(sdev: current_sdev); | 
|---|
| 426 | unsigned long flags; | 
|---|
| 427 |  | 
|---|
| 428 | spin_lock_irqsave(shost->host_lock, flags); | 
|---|
| 429 | starget->starget_sdev_user = NULL; | 
|---|
| 430 | spin_unlock_irqrestore(lock: shost->host_lock, flags); | 
|---|
| 431 |  | 
|---|
| 432 | /* | 
|---|
| 433 | * Call blk_run_queue for all LUNs on the target, starting with | 
|---|
| 434 | * current_sdev. We race with others (to set starget_sdev_user), | 
|---|
| 435 | * but in most cases, we will be first. Ideally, each LU on the | 
|---|
| 436 | * target would get some limited time or requests on the target. | 
|---|
| 437 | */ | 
|---|
| 438 | blk_mq_run_hw_queues(q: current_sdev->request_queue, | 
|---|
| 439 | async: shost->queuecommand_may_block); | 
|---|
| 440 |  | 
|---|
| 441 | spin_lock_irqsave(shost->host_lock, flags); | 
|---|
| 442 | if (!starget->starget_sdev_user) | 
|---|
| 443 | __starget_for_each_device(starget, current_sdev, | 
|---|
| 444 | fn: scsi_kick_sdev_queue); | 
|---|
| 445 | spin_unlock_irqrestore(lock: shost->host_lock, flags); | 
|---|
| 446 | } | 
|---|
| 447 |  | 
|---|
| 448 | static inline bool scsi_device_is_busy(struct scsi_device *sdev) | 
|---|
| 449 | { | 
|---|
| 450 | if (scsi_device_busy(sdev) >= sdev->queue_depth) | 
|---|
| 451 | return true; | 
|---|
| 452 | if (atomic_read(v: &sdev->device_blocked) > 0) | 
|---|
| 453 | return true; | 
|---|
| 454 | return false; | 
|---|
| 455 | } | 
|---|
| 456 |  | 
|---|
| 457 | static inline bool scsi_target_is_busy(struct scsi_target *starget) | 
|---|
| 458 | { | 
|---|
| 459 | if (starget->can_queue > 0) { | 
|---|
| 460 | if (atomic_read(v: &starget->target_busy) >= starget->can_queue) | 
|---|
| 461 | return true; | 
|---|
| 462 | if (atomic_read(v: &starget->target_blocked) > 0) | 
|---|
| 463 | return true; | 
|---|
| 464 | } | 
|---|
| 465 | return false; | 
|---|
| 466 | } | 
|---|
| 467 |  | 
|---|
| 468 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) | 
|---|
| 469 | { | 
|---|
| 470 | if (atomic_read(v: &shost->host_blocked) > 0) | 
|---|
| 471 | return true; | 
|---|
| 472 | if (shost->host_self_blocked) | 
|---|
| 473 | return true; | 
|---|
| 474 | return false; | 
|---|
| 475 | } | 
|---|
| 476 |  | 
|---|
| 477 | static void scsi_starved_list_run(struct Scsi_Host *shost) | 
|---|
| 478 | { | 
|---|
| 479 | LIST_HEAD(starved_list); | 
|---|
| 480 | struct scsi_device *sdev; | 
|---|
| 481 | unsigned long flags; | 
|---|
| 482 |  | 
|---|
| 483 | spin_lock_irqsave(shost->host_lock, flags); | 
|---|
| 484 | list_splice_init(list: &shost->starved_list, head: &starved_list); | 
|---|
| 485 |  | 
|---|
| 486 | while (!list_empty(head: &starved_list)) { | 
|---|
| 487 | struct request_queue *slq; | 
|---|
| 488 |  | 
|---|
| 489 | /* | 
|---|
| 490 | * As long as shost is accepting commands and we have | 
|---|
| 491 | * starved queues, call blk_run_queue. scsi_request_fn | 
|---|
| 492 | * drops the queue_lock and can add us back to the | 
|---|
| 493 | * starved_list. | 
|---|
| 494 | * | 
|---|
| 495 | * host_lock protects the starved_list and starved_entry. | 
|---|
| 496 | * scsi_request_fn must get the host_lock before checking | 
|---|
| 497 | * or modifying starved_list or starved_entry. | 
|---|
| 498 | */ | 
|---|
| 499 | if (scsi_host_is_busy(shost)) | 
|---|
| 500 | break; | 
|---|
| 501 |  | 
|---|
| 502 | sdev = list_entry(starved_list.next, | 
|---|
| 503 | struct scsi_device, starved_entry); | 
|---|
| 504 | list_del_init(entry: &sdev->starved_entry); | 
|---|
| 505 | if (scsi_target_is_busy(starget: scsi_target(sdev))) { | 
|---|
| 506 | list_move_tail(list: &sdev->starved_entry, | 
|---|
| 507 | head: &shost->starved_list); | 
|---|
| 508 | continue; | 
|---|
| 509 | } | 
|---|
| 510 |  | 
|---|
| 511 | /* | 
|---|
| 512 | * Once we drop the host lock, a racing scsi_remove_device() | 
|---|
| 513 | * call may remove the sdev from the starved list and destroy | 
|---|
| 514 | * it and the queue.  Mitigate by taking a reference to the | 
|---|
| 515 | * queue and never touching the sdev again after we drop the | 
|---|
| 516 | * host lock.  Note: if __scsi_remove_device() invokes | 
|---|
| 517 | * blk_mq_destroy_queue() before the queue is run from this | 
|---|
| 518 | * function then blk_run_queue() will return immediately since | 
|---|
| 519 | * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING. | 
|---|
| 520 | */ | 
|---|
| 521 | slq = sdev->request_queue; | 
|---|
| 522 | if (!blk_get_queue(slq)) | 
|---|
| 523 | continue; | 
|---|
| 524 | spin_unlock_irqrestore(lock: shost->host_lock, flags); | 
|---|
| 525 |  | 
|---|
| 526 | blk_mq_run_hw_queues(q: slq, async: false); | 
|---|
| 527 | blk_put_queue(slq); | 
|---|
| 528 |  | 
|---|
| 529 | spin_lock_irqsave(shost->host_lock, flags); | 
|---|
| 530 | } | 
|---|
| 531 | /* put any unprocessed entries back */ | 
|---|
| 532 | list_splice(list: &starved_list, head: &shost->starved_list); | 
|---|
| 533 | spin_unlock_irqrestore(lock: shost->host_lock, flags); | 
|---|
| 534 | } | 
|---|
| 535 |  | 
|---|
| 536 | /** | 
|---|
| 537 | * scsi_run_queue - Select a proper request queue to serve next. | 
|---|
| 538 | * @q:  last request's queue | 
|---|
| 539 | * | 
|---|
| 540 | * The previous command was completely finished, start a new one if possible. | 
|---|
| 541 | */ | 
|---|
| 542 | static void scsi_run_queue(struct request_queue *q) | 
|---|
| 543 | { | 
|---|
| 544 | struct scsi_device *sdev = q->queuedata; | 
|---|
| 545 |  | 
|---|
| 546 | if (scsi_target(sdev)->single_lun) | 
|---|
| 547 | scsi_single_lun_run(current_sdev: sdev); | 
|---|
| 548 | if (!list_empty(head: &sdev->host->starved_list)) | 
|---|
| 549 | scsi_starved_list_run(shost: sdev->host); | 
|---|
| 550 |  | 
|---|
| 551 | /* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */ | 
|---|
| 552 | blk_mq_kick_requeue_list(q); | 
|---|
| 553 | } | 
|---|
| 554 |  | 
|---|
| 555 | void scsi_requeue_run_queue(struct work_struct *work) | 
|---|
| 556 | { | 
|---|
| 557 | struct scsi_device *sdev; | 
|---|
| 558 | struct request_queue *q; | 
|---|
| 559 |  | 
|---|
| 560 | sdev = container_of(work, struct scsi_device, requeue_work); | 
|---|
| 561 | q = sdev->request_queue; | 
|---|
| 562 | scsi_run_queue(q); | 
|---|
| 563 | } | 
|---|
| 564 |  | 
|---|
| 565 | void scsi_run_host_queues(struct Scsi_Host *shost) | 
|---|
| 566 | { | 
|---|
| 567 | struct scsi_device *sdev; | 
|---|
| 568 |  | 
|---|
| 569 | shost_for_each_device(sdev, shost) | 
|---|
| 570 | scsi_run_queue(q: sdev->request_queue); | 
|---|
| 571 | } | 
|---|
| 572 |  | 
|---|
| 573 | static void scsi_uninit_cmd(struct scsi_cmnd *cmd) | 
|---|
| 574 | { | 
|---|
| 575 | if (!blk_rq_is_passthrough(rq: scsi_cmd_to_rq(scmd: cmd))) { | 
|---|
| 576 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); | 
|---|
| 577 |  | 
|---|
| 578 | if (drv->uninit_command) | 
|---|
| 579 | drv->uninit_command(cmd); | 
|---|
| 580 | } | 
|---|
| 581 | } | 
|---|
| 582 |  | 
|---|
| 583 | void scsi_free_sgtables(struct scsi_cmnd *cmd) | 
|---|
| 584 | { | 
|---|
| 585 | if (cmd->sdb.table.nents) | 
|---|
| 586 | sg_free_table_chained(table: &cmd->sdb.table, | 
|---|
| 587 | SCSI_INLINE_SG_CNT); | 
|---|
| 588 | if (scsi_prot_sg_count(cmd)) | 
|---|
| 589 | sg_free_table_chained(table: &cmd->prot_sdb->table, | 
|---|
| 590 | SCSI_INLINE_PROT_SG_CNT); | 
|---|
| 591 | } | 
|---|
| 592 | EXPORT_SYMBOL_GPL(scsi_free_sgtables); | 
|---|
| 593 |  | 
|---|
| 594 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) | 
|---|
| 595 | { | 
|---|
| 596 | scsi_free_sgtables(cmd); | 
|---|
| 597 | scsi_uninit_cmd(cmd); | 
|---|
| 598 | } | 
|---|
| 599 |  | 
|---|
| 600 | static void scsi_run_queue_async(struct scsi_device *sdev) | 
|---|
| 601 | { | 
|---|
| 602 | if (scsi_host_in_recovery(shost: sdev->host)) | 
|---|
| 603 | return; | 
|---|
| 604 |  | 
|---|
| 605 | if (scsi_target(sdev)->single_lun || | 
|---|
| 606 | !list_empty(head: &sdev->host->starved_list)) { | 
|---|
| 607 | kblockd_schedule_work(work: &sdev->requeue_work); | 
|---|
| 608 | } else { | 
|---|
| 609 | /* | 
|---|
| 610 | * smp_mb() present in sbitmap_queue_clear() or implied in | 
|---|
| 611 | * .end_io is for ordering writing .device_busy in | 
|---|
| 612 | * scsi_device_unbusy() and reading sdev->restarts. | 
|---|
| 613 | */ | 
|---|
| 614 | int old = atomic_read(v: &sdev->restarts); | 
|---|
| 615 |  | 
|---|
| 616 | /* | 
|---|
| 617 | * ->restarts has to be kept as non-zero if new budget | 
|---|
| 618 | *  contention occurs. | 
|---|
| 619 | * | 
|---|
| 620 | *  No need to run queue when either another re-run | 
|---|
| 621 | *  queue wins in updating ->restarts or a new budget | 
|---|
| 622 | *  contention occurs. | 
|---|
| 623 | */ | 
|---|
| 624 | if (old && atomic_cmpxchg(v: &sdev->restarts, old, new: 0) == old) | 
|---|
| 625 | blk_mq_run_hw_queues(q: sdev->request_queue, async: true); | 
|---|
| 626 | } | 
|---|
| 627 | } | 
|---|
| 628 |  | 
|---|
| 629 | /* Returns false when no more bytes to process, true if there are more */ | 
|---|
| 630 | static bool scsi_end_request(struct request *req, blk_status_t error, | 
|---|
| 631 | unsigned int bytes) | 
|---|
| 632 | { | 
|---|
| 633 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); | 
|---|
| 634 | struct scsi_device *sdev = cmd->device; | 
|---|
| 635 | struct request_queue *q = sdev->request_queue; | 
|---|
| 636 |  | 
|---|
| 637 | if (blk_update_request(rq: req, error, nr_bytes: bytes)) | 
|---|
| 638 | return true; | 
|---|
| 639 |  | 
|---|
| 640 | if (q->limits.features & BLK_FEAT_ADD_RANDOM) | 
|---|
| 641 | add_disk_randomness(disk: req->q->disk); | 
|---|
| 642 |  | 
|---|
| 643 | WARN_ON_ONCE(!blk_rq_is_passthrough(req) && | 
|---|
| 644 | !(cmd->flags & SCMD_INITIALIZED)); | 
|---|
| 645 | cmd->flags = 0; | 
|---|
| 646 |  | 
|---|
| 647 | /* | 
|---|
| 648 | * Calling rcu_barrier() is not necessary here because the | 
|---|
| 649 | * SCSI error handler guarantees that the function called by | 
|---|
| 650 | * call_rcu() has been called before scsi_end_request() is | 
|---|
| 651 | * called. | 
|---|
| 652 | */ | 
|---|
| 653 | destroy_rcu_head(head: &cmd->rcu); | 
|---|
| 654 |  | 
|---|
| 655 | /* | 
|---|
| 656 | * In the MQ case the command gets freed by __blk_mq_end_request, | 
|---|
| 657 | * so we have to do all cleanup that depends on it earlier. | 
|---|
| 658 | * | 
|---|
| 659 | * We also can't kick the queues from irq context, so we | 
|---|
| 660 | * will have to defer it to a workqueue. | 
|---|
| 661 | */ | 
|---|
| 662 | scsi_mq_uninit_cmd(cmd); | 
|---|
| 663 |  | 
|---|
| 664 | /* | 
|---|
| 665 | * queue is still alive, so grab the ref for preventing it | 
|---|
| 666 | * from being cleaned up during running queue. | 
|---|
| 667 | */ | 
|---|
| 668 | percpu_ref_get(ref: &q->q_usage_counter); | 
|---|
| 669 |  | 
|---|
| 670 | __blk_mq_end_request(rq: req, error); | 
|---|
| 671 |  | 
|---|
| 672 | scsi_run_queue_async(sdev); | 
|---|
| 673 |  | 
|---|
| 674 | percpu_ref_put(ref: &q->q_usage_counter); | 
|---|
| 675 | return false; | 
|---|
| 676 | } | 
|---|
| 677 |  | 
|---|
| 678 | /** | 
|---|
| 679 | * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t | 
|---|
| 680 | * @result:	scsi error code | 
|---|
| 681 | * | 
|---|
| 682 | * Translate a SCSI result code into a blk_status_t value. | 
|---|
| 683 | */ | 
|---|
| 684 | static blk_status_t scsi_result_to_blk_status(int result) | 
|---|
| 685 | { | 
|---|
| 686 | /* | 
|---|
| 687 | * Check the scsi-ml byte first in case we converted a host or status | 
|---|
| 688 | * byte. | 
|---|
| 689 | */ | 
|---|
| 690 | switch (scsi_ml_byte(result)) { | 
|---|
| 691 | case SCSIML_STAT_OK: | 
|---|
| 692 | break; | 
|---|
| 693 | case SCSIML_STAT_RESV_CONFLICT: | 
|---|
| 694 | return BLK_STS_RESV_CONFLICT; | 
|---|
| 695 | case SCSIML_STAT_NOSPC: | 
|---|
| 696 | return BLK_STS_NOSPC; | 
|---|
| 697 | case SCSIML_STAT_MED_ERROR: | 
|---|
| 698 | return BLK_STS_MEDIUM; | 
|---|
| 699 | case SCSIML_STAT_TGT_FAILURE: | 
|---|
| 700 | return BLK_STS_TARGET; | 
|---|
| 701 | case SCSIML_STAT_DL_TIMEOUT: | 
|---|
| 702 | return BLK_STS_DURATION_LIMIT; | 
|---|
| 703 | } | 
|---|
| 704 |  | 
|---|
| 705 | switch (host_byte(result)) { | 
|---|
| 706 | case DID_OK: | 
|---|
| 707 | if (scsi_status_is_good(status: result)) | 
|---|
| 708 | return BLK_STS_OK; | 
|---|
| 709 | return BLK_STS_IOERR; | 
|---|
| 710 | case DID_TRANSPORT_FAILFAST: | 
|---|
| 711 | case DID_TRANSPORT_MARGINAL: | 
|---|
| 712 | return BLK_STS_TRANSPORT; | 
|---|
| 713 | default: | 
|---|
| 714 | return BLK_STS_IOERR; | 
|---|
| 715 | } | 
|---|
| 716 | } | 
|---|
| 717 |  | 
|---|
| 718 | /** | 
|---|
| 719 | * scsi_rq_err_bytes - determine number of bytes till the next failure boundary | 
|---|
| 720 | * @rq: request to examine | 
|---|
| 721 | * | 
|---|
| 722 | * Description: | 
|---|
| 723 | *     A request could be merge of IOs which require different failure | 
|---|
| 724 | *     handling.  This function determines the number of bytes which | 
|---|
| 725 | *     can be failed from the beginning of the request without | 
|---|
| 726 | *     crossing into area which need to be retried further. | 
|---|
| 727 | * | 
|---|
| 728 | * Return: | 
|---|
| 729 | *     The number of bytes to fail. | 
|---|
| 730 | */ | 
|---|
| 731 | static unsigned int scsi_rq_err_bytes(const struct request *rq) | 
|---|
| 732 | { | 
|---|
| 733 | blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; | 
|---|
| 734 | unsigned int bytes = 0; | 
|---|
| 735 | struct bio *bio; | 
|---|
| 736 |  | 
|---|
| 737 | if (!(rq->rq_flags & RQF_MIXED_MERGE)) | 
|---|
| 738 | return blk_rq_bytes(rq); | 
|---|
| 739 |  | 
|---|
| 740 | /* | 
|---|
| 741 | * Currently the only 'mixing' which can happen is between | 
|---|
| 742 | * different fastfail types.  We can safely fail portions | 
|---|
| 743 | * which have all the failfast bits that the first one has - | 
|---|
| 744 | * the ones which are at least as eager to fail as the first | 
|---|
| 745 | * one. | 
|---|
| 746 | */ | 
|---|
| 747 | for (bio = rq->bio; bio; bio = bio->bi_next) { | 
|---|
| 748 | if ((bio->bi_opf & ff) != ff) | 
|---|
| 749 | break; | 
|---|
| 750 | bytes += bio->bi_iter.bi_size; | 
|---|
| 751 | } | 
|---|
| 752 |  | 
|---|
| 753 | /* this could lead to infinite loop */ | 
|---|
| 754 | BUG_ON(blk_rq_bytes(rq) && !bytes); | 
|---|
| 755 | return bytes; | 
|---|
| 756 | } | 
|---|
| 757 |  | 
|---|
| 758 | static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) | 
|---|
| 759 | { | 
|---|
| 760 | struct request *req = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 761 | unsigned long wait_for; | 
|---|
| 762 |  | 
|---|
| 763 | if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) | 
|---|
| 764 | return false; | 
|---|
| 765 |  | 
|---|
| 766 | wait_for = (cmd->allowed + 1) * req->timeout; | 
|---|
| 767 | if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | 
|---|
| 768 | scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n", | 
|---|
| 769 | wait_for/HZ); | 
|---|
| 770 | return true; | 
|---|
| 771 | } | 
|---|
| 772 | return false; | 
|---|
| 773 | } | 
|---|
| 774 |  | 
|---|
| 775 | /* | 
|---|
| 776 | * When ALUA transition state is returned, reprep the cmd to | 
|---|
| 777 | * use the ALUA handler's transition timeout. Delay the reprep | 
|---|
| 778 | * 1 sec to avoid aggressive retries of the target in that | 
|---|
| 779 | * state. | 
|---|
| 780 | */ | 
|---|
| 781 | #define ALUA_TRANSITION_REPREP_DELAY	1000 | 
|---|
| 782 |  | 
|---|
| 783 | /* Helper for scsi_io_completion() when special action required. */ | 
|---|
| 784 | static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) | 
|---|
| 785 | { | 
|---|
| 786 | struct request *req = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 787 | int level = 0; | 
|---|
| 788 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP, | 
|---|
| 789 | ACTION_RETRY, ACTION_DELAYED_RETRY} action; | 
|---|
| 790 | struct scsi_sense_hdr sshdr; | 
|---|
| 791 | bool sense_valid; | 
|---|
| 792 | bool sense_current = true;      /* false implies "deferred sense" */ | 
|---|
| 793 | blk_status_t blk_stat; | 
|---|
| 794 |  | 
|---|
| 795 | sense_valid = scsi_command_normalize_sense(cmd, sshdr: &sshdr); | 
|---|
| 796 | if (sense_valid) | 
|---|
| 797 | sense_current = !scsi_sense_is_deferred(sshdr: &sshdr); | 
|---|
| 798 |  | 
|---|
| 799 | blk_stat = scsi_result_to_blk_status(result); | 
|---|
| 800 |  | 
|---|
| 801 | if (host_byte(result) == DID_RESET) { | 
|---|
| 802 | /* Third party bus reset or reset for error recovery | 
|---|
| 803 | * reasons.  Just retry the command and see what | 
|---|
| 804 | * happens. | 
|---|
| 805 | */ | 
|---|
| 806 | action = ACTION_RETRY; | 
|---|
| 807 | } else if (sense_valid && sense_current) { | 
|---|
| 808 | switch (sshdr.sense_key) { | 
|---|
| 809 | case UNIT_ATTENTION: | 
|---|
| 810 | if (cmd->device->removable) { | 
|---|
| 811 | /* Detected disc change.  Set a bit | 
|---|
| 812 | * and quietly refuse further access. | 
|---|
| 813 | */ | 
|---|
| 814 | cmd->device->changed = 1; | 
|---|
| 815 | action = ACTION_FAIL; | 
|---|
| 816 | } else { | 
|---|
| 817 | /* Must have been a power glitch, or a | 
|---|
| 818 | * bus reset.  Could not have been a | 
|---|
| 819 | * media change, so we just retry the | 
|---|
| 820 | * command and see what happens. | 
|---|
| 821 | */ | 
|---|
| 822 | action = ACTION_RETRY; | 
|---|
| 823 | } | 
|---|
| 824 | break; | 
|---|
| 825 | case ILLEGAL_REQUEST: | 
|---|
| 826 | /* If we had an ILLEGAL REQUEST returned, then | 
|---|
| 827 | * we may have performed an unsupported | 
|---|
| 828 | * command.  The only thing this should be | 
|---|
| 829 | * would be a ten byte read where only a six | 
|---|
| 830 | * byte read was supported.  Also, on a system | 
|---|
| 831 | * where READ CAPACITY failed, we may have | 
|---|
| 832 | * read past the end of the disk. | 
|---|
| 833 | */ | 
|---|
| 834 | if ((cmd->device->use_10_for_rw && | 
|---|
| 835 | sshdr.asc == 0x20 && sshdr.ascq == 0x00) && | 
|---|
| 836 | (cmd->cmnd[0] == READ_10 || | 
|---|
| 837 | cmd->cmnd[0] == WRITE_10)) { | 
|---|
| 838 | /* This will issue a new 6-byte command. */ | 
|---|
| 839 | cmd->device->use_10_for_rw = 0; | 
|---|
| 840 | action = ACTION_REPREP; | 
|---|
| 841 | } else if (sshdr.asc == 0x10) /* DIX */ { | 
|---|
| 842 | action = ACTION_FAIL; | 
|---|
| 843 | blk_stat = BLK_STS_PROTECTION; | 
|---|
| 844 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ | 
|---|
| 845 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { | 
|---|
| 846 | action = ACTION_FAIL; | 
|---|
| 847 | blk_stat = BLK_STS_TARGET; | 
|---|
| 848 | } else | 
|---|
| 849 | action = ACTION_FAIL; | 
|---|
| 850 | break; | 
|---|
| 851 | case ABORTED_COMMAND: | 
|---|
| 852 | action = ACTION_FAIL; | 
|---|
| 853 | if (sshdr.asc == 0x10) /* DIF */ | 
|---|
| 854 | blk_stat = BLK_STS_PROTECTION; | 
|---|
| 855 | break; | 
|---|
| 856 | case NOT_READY: | 
|---|
| 857 | /* If the device is in the process of becoming | 
|---|
| 858 | * ready, or has a temporary blockage, retry. | 
|---|
| 859 | */ | 
|---|
| 860 | if (sshdr.asc == 0x04) { | 
|---|
| 861 | switch (sshdr.ascq) { | 
|---|
| 862 | case 0x01: /* becoming ready */ | 
|---|
| 863 | case 0x04: /* format in progress */ | 
|---|
| 864 | case 0x05: /* rebuild in progress */ | 
|---|
| 865 | case 0x06: /* recalculation in progress */ | 
|---|
| 866 | case 0x07: /* operation in progress */ | 
|---|
| 867 | case 0x08: /* Long write in progress */ | 
|---|
| 868 | case 0x09: /* self test in progress */ | 
|---|
| 869 | case 0x11: /* notify (enable spinup) required */ | 
|---|
| 870 | case 0x14: /* space allocation in progress */ | 
|---|
| 871 | case 0x1a: /* start stop unit in progress */ | 
|---|
| 872 | case 0x1b: /* sanitize in progress */ | 
|---|
| 873 | case 0x1d: /* configuration in progress */ | 
|---|
| 874 | action = ACTION_DELAYED_RETRY; | 
|---|
| 875 | break; | 
|---|
| 876 | case 0x0a: /* ALUA state transition */ | 
|---|
| 877 | action = ACTION_DELAYED_REPREP; | 
|---|
| 878 | break; | 
|---|
| 879 | /* | 
|---|
| 880 | * Depopulation might take many hours, | 
|---|
| 881 | * thus it is not worthwhile to retry. | 
|---|
| 882 | */ | 
|---|
| 883 | case 0x24: /* depopulation in progress */ | 
|---|
| 884 | case 0x25: /* depopulation restore in progress */ | 
|---|
| 885 | fallthrough; | 
|---|
| 886 | default: | 
|---|
| 887 | action = ACTION_FAIL; | 
|---|
| 888 | break; | 
|---|
| 889 | } | 
|---|
| 890 | } else | 
|---|
| 891 | action = ACTION_FAIL; | 
|---|
| 892 | break; | 
|---|
| 893 | case VOLUME_OVERFLOW: | 
|---|
| 894 | /* See SSC3rXX or current. */ | 
|---|
| 895 | action = ACTION_FAIL; | 
|---|
| 896 | break; | 
|---|
| 897 | case DATA_PROTECT: | 
|---|
| 898 | action = ACTION_FAIL; | 
|---|
| 899 | if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) || | 
|---|
| 900 | (sshdr.asc == 0x55 && | 
|---|
| 901 | (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) { | 
|---|
| 902 | /* Insufficient zone resources */ | 
|---|
| 903 | blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; | 
|---|
| 904 | } | 
|---|
| 905 | break; | 
|---|
| 906 | case COMPLETED: | 
|---|
| 907 | fallthrough; | 
|---|
| 908 | default: | 
|---|
| 909 | action = ACTION_FAIL; | 
|---|
| 910 | break; | 
|---|
| 911 | } | 
|---|
| 912 | } else | 
|---|
| 913 | action = ACTION_FAIL; | 
|---|
| 914 |  | 
|---|
| 915 | if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd)) | 
|---|
| 916 | action = ACTION_FAIL; | 
|---|
| 917 |  | 
|---|
| 918 | switch (action) { | 
|---|
| 919 | case ACTION_FAIL: | 
|---|
| 920 | /* Give up and fail the remainder of the request */ | 
|---|
| 921 | if (!(req->rq_flags & RQF_QUIET)) { | 
|---|
| 922 | static DEFINE_RATELIMIT_STATE(_rs, | 
|---|
| 923 | DEFAULT_RATELIMIT_INTERVAL, | 
|---|
| 924 | DEFAULT_RATELIMIT_BURST); | 
|---|
| 925 |  | 
|---|
| 926 | if (unlikely(scsi_logging_level)) | 
|---|
| 927 | level = | 
|---|
| 928 | SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, | 
|---|
| 929 | SCSI_LOG_MLCOMPLETE_BITS); | 
|---|
| 930 |  | 
|---|
| 931 | /* | 
|---|
| 932 | * if logging is enabled the failure will be printed | 
|---|
| 933 | * in scsi_log_completion(), so avoid duplicate messages | 
|---|
| 934 | */ | 
|---|
| 935 | if (!level && __ratelimit(&_rs)) { | 
|---|
| 936 | scsi_print_result(cmd, NULL, FAILED); | 
|---|
| 937 | if (sense_valid) | 
|---|
| 938 | scsi_print_sense(cmd); | 
|---|
| 939 | scsi_print_command(cmd); | 
|---|
| 940 | } | 
|---|
| 941 | } | 
|---|
| 942 | if (!scsi_end_request(req, error: blk_stat, bytes: scsi_rq_err_bytes(rq: req))) | 
|---|
| 943 | return; | 
|---|
| 944 | fallthrough; | 
|---|
| 945 | case ACTION_REPREP: | 
|---|
| 946 | scsi_mq_requeue_cmd(cmd, msecs: 0); | 
|---|
| 947 | break; | 
|---|
| 948 | case ACTION_DELAYED_REPREP: | 
|---|
| 949 | scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY); | 
|---|
| 950 | break; | 
|---|
| 951 | case ACTION_RETRY: | 
|---|
| 952 | /* Retry the same command immediately */ | 
|---|
| 953 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, unbusy: false); | 
|---|
| 954 | break; | 
|---|
| 955 | case ACTION_DELAYED_RETRY: | 
|---|
| 956 | /* Retry the same command after a delay */ | 
|---|
| 957 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, unbusy: false); | 
|---|
| 958 | break; | 
|---|
| 959 | } | 
|---|
| 960 | } | 
|---|
| 961 |  | 
|---|
| 962 | /* | 
|---|
| 963 | * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a | 
|---|
| 964 | * new result that may suppress further error checking. Also modifies | 
|---|
| 965 | * *blk_statp in some cases. | 
|---|
| 966 | */ | 
|---|
| 967 | static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, | 
|---|
| 968 | blk_status_t *blk_statp) | 
|---|
| 969 | { | 
|---|
| 970 | bool sense_valid; | 
|---|
| 971 | bool sense_current = true;	/* false implies "deferred sense" */ | 
|---|
| 972 | struct request *req = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 973 | struct scsi_sense_hdr sshdr; | 
|---|
| 974 |  | 
|---|
| 975 | sense_valid = scsi_command_normalize_sense(cmd, sshdr: &sshdr); | 
|---|
| 976 | if (sense_valid) | 
|---|
| 977 | sense_current = !scsi_sense_is_deferred(sshdr: &sshdr); | 
|---|
| 978 |  | 
|---|
| 979 | if (blk_rq_is_passthrough(rq: req)) { | 
|---|
| 980 | if (sense_valid) { | 
|---|
| 981 | /* | 
|---|
| 982 | * SG_IO wants current and deferred errors | 
|---|
| 983 | */ | 
|---|
| 984 | cmd->sense_len = min(8 + cmd->sense_buffer[7], | 
|---|
| 985 | SCSI_SENSE_BUFFERSIZE); | 
|---|
| 986 | } | 
|---|
| 987 | if (sense_current) | 
|---|
| 988 | *blk_statp = scsi_result_to_blk_status(result); | 
|---|
| 989 | } else if (blk_rq_bytes(rq: req) == 0 && sense_current) { | 
|---|
| 990 | /* | 
|---|
| 991 | * Flush commands do not transfers any data, and thus cannot use | 
|---|
| 992 | * good_bytes != blk_rq_bytes(req) as the signal for an error. | 
|---|
| 993 | * This sets *blk_statp explicitly for the problem case. | 
|---|
| 994 | */ | 
|---|
| 995 | *blk_statp = scsi_result_to_blk_status(result); | 
|---|
| 996 | } | 
|---|
| 997 | /* | 
|---|
| 998 | * Recovered errors need reporting, but they're always treated as | 
|---|
| 999 | * success, so fiddle the result code here.  For passthrough requests | 
|---|
| 1000 | * we already took a copy of the original into sreq->result which | 
|---|
| 1001 | * is what gets returned to the user | 
|---|
| 1002 | */ | 
|---|
| 1003 | if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { | 
|---|
| 1004 | bool do_print = true; | 
|---|
| 1005 | /* | 
|---|
| 1006 | * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d] | 
|---|
| 1007 | * skip print since caller wants ATA registers. Only occurs | 
|---|
| 1008 | * on SCSI ATA PASS_THROUGH commands when CK_COND=1 | 
|---|
| 1009 | */ | 
|---|
| 1010 | if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) | 
|---|
| 1011 | do_print = false; | 
|---|
| 1012 | else if (req->rq_flags & RQF_QUIET) | 
|---|
| 1013 | do_print = false; | 
|---|
| 1014 | if (do_print) | 
|---|
| 1015 | scsi_print_sense(cmd); | 
|---|
| 1016 | result = 0; | 
|---|
| 1017 | /* for passthrough, *blk_statp may be set */ | 
|---|
| 1018 | *blk_statp = BLK_STS_OK; | 
|---|
| 1019 | } | 
|---|
| 1020 | /* | 
|---|
| 1021 | * Another corner case: the SCSI status byte is non-zero but 'good'. | 
|---|
| 1022 | * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when | 
|---|
| 1023 | * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD | 
|---|
| 1024 | * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related | 
|---|
| 1025 | * intermediate statuses (both obsolete in SAM-4) as good. | 
|---|
| 1026 | */ | 
|---|
| 1027 | if ((result & 0xff) && scsi_status_is_good(status: result)) { | 
|---|
| 1028 | result = 0; | 
|---|
| 1029 | *blk_statp = BLK_STS_OK; | 
|---|
| 1030 | } | 
|---|
| 1031 | return result; | 
|---|
| 1032 | } | 
|---|
| 1033 |  | 
|---|
| 1034 | /** | 
|---|
| 1035 | * scsi_io_completion - Completion processing for SCSI commands. | 
|---|
| 1036 | * @cmd:	command that is finished. | 
|---|
| 1037 | * @good_bytes:	number of processed bytes. | 
|---|
| 1038 | * | 
|---|
| 1039 | * We will finish off the specified number of sectors. If we are done, the | 
|---|
| 1040 | * command block will be released and the queue function will be goosed. If we | 
|---|
| 1041 | * are not done then we have to figure out what to do next: | 
|---|
| 1042 | * | 
|---|
| 1043 | *   a) We can call scsi_mq_requeue_cmd().  The request will be | 
|---|
| 1044 | *	unprepared and put back on the queue.  Then a new command will | 
|---|
| 1045 | *	be created for it.  This should be used if we made forward | 
|---|
| 1046 | *	progress, or if we want to switch from READ(10) to READ(6) for | 
|---|
| 1047 | *	example. | 
|---|
| 1048 | * | 
|---|
| 1049 | *   b) We can call scsi_io_completion_action().  The request will be | 
|---|
| 1050 | *	put back on the queue and retried using the same command as | 
|---|
| 1051 | *	before, possibly after a delay. | 
|---|
| 1052 | * | 
|---|
| 1053 | *   c) We can call scsi_end_request() with blk_stat other than | 
|---|
| 1054 | *	BLK_STS_OK, to fail the remainder of the request. | 
|---|
| 1055 | */ | 
|---|
| 1056 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | 
|---|
| 1057 | { | 
|---|
| 1058 | int result = cmd->result; | 
|---|
| 1059 | struct request *req = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 1060 | blk_status_t blk_stat = BLK_STS_OK; | 
|---|
| 1061 |  | 
|---|
| 1062 | if (unlikely(result))	/* a nz result may or may not be an error */ | 
|---|
| 1063 | result = scsi_io_completion_nz_result(cmd, result, blk_statp: &blk_stat); | 
|---|
| 1064 |  | 
|---|
| 1065 | /* | 
|---|
| 1066 | * Next deal with any sectors which we were able to correctly | 
|---|
| 1067 | * handle. | 
|---|
| 1068 | */ | 
|---|
| 1069 | SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, | 
|---|
| 1070 | "%u sectors total, %d bytes done.\n", | 
|---|
| 1071 | blk_rq_sectors(req), good_bytes)); | 
|---|
| 1072 |  | 
|---|
| 1073 | /* | 
|---|
| 1074 | * Failed, zero length commands always need to drop down | 
|---|
| 1075 | * to retry code. Fast path should return in this block. | 
|---|
| 1076 | */ | 
|---|
| 1077 | if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { | 
|---|
| 1078 | if (likely(!scsi_end_request(req, blk_stat, good_bytes))) | 
|---|
| 1079 | return; /* no bytes remaining */ | 
|---|
| 1080 | } | 
|---|
| 1081 |  | 
|---|
| 1082 | /* Kill remainder if no retries. */ | 
|---|
| 1083 | if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { | 
|---|
| 1084 | if (scsi_end_request(req, error: blk_stat, bytes: blk_rq_bytes(rq: req))) | 
|---|
| 1085 | WARN_ONCE(true, | 
|---|
| 1086 | "Bytes remaining after failed, no-retry command"); | 
|---|
| 1087 | return; | 
|---|
| 1088 | } | 
|---|
| 1089 |  | 
|---|
| 1090 | /* | 
|---|
| 1091 | * If there had been no error, but we have leftover bytes in the | 
|---|
| 1092 | * request just queue the command up again. | 
|---|
| 1093 | */ | 
|---|
| 1094 | if (likely(result == 0)) | 
|---|
| 1095 | scsi_mq_requeue_cmd(cmd, msecs: 0); | 
|---|
| 1096 | else | 
|---|
| 1097 | scsi_io_completion_action(cmd, result); | 
|---|
| 1098 | } | 
|---|
| 1099 |  | 
|---|
| 1100 | static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, | 
|---|
| 1101 | struct request *rq) | 
|---|
| 1102 | { | 
|---|
| 1103 | return sdev->dma_drain_len && blk_rq_is_passthrough(rq) && | 
|---|
| 1104 | !op_is_write(op: req_op(req: rq)) && | 
|---|
| 1105 | sdev->host->hostt->dma_need_drain(rq); | 
|---|
| 1106 | } | 
|---|
| 1107 |  | 
|---|
| 1108 | /** | 
|---|
| 1109 | * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists | 
|---|
| 1110 | * @cmd: SCSI command data structure to initialize. | 
|---|
| 1111 | * | 
|---|
| 1112 | * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled | 
|---|
| 1113 | * for @cmd. | 
|---|
| 1114 | * | 
|---|
| 1115 | * Returns: | 
|---|
| 1116 | * * BLK_STS_OK       - on success | 
|---|
| 1117 | * * BLK_STS_RESOURCE - if the failure is retryable | 
|---|
| 1118 | * * BLK_STS_IOERR    - if the failure is fatal | 
|---|
| 1119 | */ | 
|---|
| 1120 | blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) | 
|---|
| 1121 | { | 
|---|
| 1122 | struct scsi_device *sdev = cmd->device; | 
|---|
| 1123 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 1124 | unsigned short nr_segs = blk_rq_nr_phys_segments(rq); | 
|---|
| 1125 | struct scatterlist *last_sg = NULL; | 
|---|
| 1126 | blk_status_t ret; | 
|---|
| 1127 | bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq); | 
|---|
| 1128 | int count; | 
|---|
| 1129 |  | 
|---|
| 1130 | if (WARN_ON_ONCE(!nr_segs)) | 
|---|
| 1131 | return BLK_STS_IOERR; | 
|---|
| 1132 |  | 
|---|
| 1133 | /* | 
|---|
| 1134 | * Make sure there is space for the drain.  The driver must adjust | 
|---|
| 1135 | * max_hw_segments to be prepared for this. | 
|---|
| 1136 | */ | 
|---|
| 1137 | if (need_drain) | 
|---|
| 1138 | nr_segs++; | 
|---|
| 1139 |  | 
|---|
| 1140 | /* | 
|---|
| 1141 | * If sg table allocation fails, requeue request later. | 
|---|
| 1142 | */ | 
|---|
| 1143 | if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs, | 
|---|
| 1144 | cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT))) | 
|---|
| 1145 | return BLK_STS_RESOURCE; | 
|---|
| 1146 |  | 
|---|
| 1147 | /* | 
|---|
| 1148 | * Next, walk the list, and fill in the addresses and sizes of | 
|---|
| 1149 | * each segment. | 
|---|
| 1150 | */ | 
|---|
| 1151 | count = __blk_rq_map_sg(rq, sglist: cmd->sdb.table.sgl, last_sg: &last_sg); | 
|---|
| 1152 |  | 
|---|
| 1153 | if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) { | 
|---|
| 1154 | unsigned int pad_len = | 
|---|
| 1155 | (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | 
|---|
| 1156 |  | 
|---|
| 1157 | last_sg->length += pad_len; | 
|---|
| 1158 | cmd->extra_len += pad_len; | 
|---|
| 1159 | } | 
|---|
| 1160 |  | 
|---|
| 1161 | if (need_drain) { | 
|---|
| 1162 | sg_unmark_end(sg: last_sg); | 
|---|
| 1163 | last_sg = sg_next(sg: last_sg); | 
|---|
| 1164 | sg_set_buf(sg: last_sg, buf: sdev->dma_drain_buf, buflen: sdev->dma_drain_len); | 
|---|
| 1165 | sg_mark_end(sg: last_sg); | 
|---|
| 1166 |  | 
|---|
| 1167 | cmd->extra_len += sdev->dma_drain_len; | 
|---|
| 1168 | count++; | 
|---|
| 1169 | } | 
|---|
| 1170 |  | 
|---|
| 1171 | BUG_ON(count > cmd->sdb.table.nents); | 
|---|
| 1172 | cmd->sdb.table.nents = count; | 
|---|
| 1173 | cmd->sdb.length = blk_rq_payload_bytes(rq); | 
|---|
| 1174 |  | 
|---|
| 1175 | if (blk_integrity_rq(rq)) { | 
|---|
| 1176 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; | 
|---|
| 1177 |  | 
|---|
| 1178 | if (WARN_ON_ONCE(!prot_sdb)) { | 
|---|
| 1179 | /* | 
|---|
| 1180 | * This can happen if someone (e.g. multipath) | 
|---|
| 1181 | * queues a command to a device on an adapter | 
|---|
| 1182 | * that does not support DIX. | 
|---|
| 1183 | */ | 
|---|
| 1184 | ret = BLK_STS_IOERR; | 
|---|
| 1185 | goto out_free_sgtables; | 
|---|
| 1186 | } | 
|---|
| 1187 |  | 
|---|
| 1188 | if (sg_alloc_table_chained(table: &prot_sdb->table, | 
|---|
| 1189 | nents: rq->nr_integrity_segments, | 
|---|
| 1190 | first_chunk: prot_sdb->table.sgl, | 
|---|
| 1191 | SCSI_INLINE_PROT_SG_CNT)) { | 
|---|
| 1192 | ret = BLK_STS_RESOURCE; | 
|---|
| 1193 | goto out_free_sgtables; | 
|---|
| 1194 | } | 
|---|
| 1195 |  | 
|---|
| 1196 | count = blk_rq_map_integrity_sg(q: rq, s: prot_sdb->table.sgl); | 
|---|
| 1197 | cmd->prot_sdb = prot_sdb; | 
|---|
| 1198 | cmd->prot_sdb->table.nents = count; | 
|---|
| 1199 | } | 
|---|
| 1200 |  | 
|---|
| 1201 | return BLK_STS_OK; | 
|---|
| 1202 | out_free_sgtables: | 
|---|
| 1203 | scsi_free_sgtables(cmd); | 
|---|
| 1204 | return ret; | 
|---|
| 1205 | } | 
|---|
| 1206 | EXPORT_SYMBOL(scsi_alloc_sgtables); | 
|---|
| 1207 |  | 
|---|
| 1208 | /** | 
|---|
| 1209 | * scsi_initialize_rq - initialize struct scsi_cmnd partially | 
|---|
| 1210 | * @rq: Request associated with the SCSI command to be initialized. | 
|---|
| 1211 | * | 
|---|
| 1212 | * This function initializes the members of struct scsi_cmnd that must be | 
|---|
| 1213 | * initialized before request processing starts and that won't be | 
|---|
| 1214 | * reinitialized if a SCSI command is requeued. | 
|---|
| 1215 | */ | 
|---|
| 1216 | static void scsi_initialize_rq(struct request *rq) | 
|---|
| 1217 | { | 
|---|
| 1218 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | 
|---|
| 1219 |  | 
|---|
| 1220 | memset(s: cmd->cmnd, c: 0, n: sizeof(cmd->cmnd)); | 
|---|
| 1221 | cmd->cmd_len = MAX_COMMAND_SIZE; | 
|---|
| 1222 | cmd->sense_len = 0; | 
|---|
| 1223 | init_rcu_head(head: &cmd->rcu); | 
|---|
| 1224 | cmd->jiffies_at_alloc = jiffies; | 
|---|
| 1225 | cmd->retries = 0; | 
|---|
| 1226 | } | 
|---|
| 1227 |  | 
|---|
| 1228 | /** | 
|---|
| 1229 | * scsi_alloc_request - allocate a block request and partially | 
|---|
| 1230 | *                      initialize its &scsi_cmnd | 
|---|
| 1231 | * @q: the device's request queue | 
|---|
| 1232 | * @opf: the request operation code | 
|---|
| 1233 | * @flags: block layer allocation flags | 
|---|
| 1234 | * | 
|---|
| 1235 | * Return: &struct request pointer on success or %NULL on failure | 
|---|
| 1236 | */ | 
|---|
| 1237 | struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, | 
|---|
| 1238 | blk_mq_req_flags_t flags) | 
|---|
| 1239 | { | 
|---|
| 1240 | struct request *rq; | 
|---|
| 1241 |  | 
|---|
| 1242 | rq = blk_mq_alloc_request(q, opf, flags); | 
|---|
| 1243 | if (!IS_ERR(ptr: rq)) | 
|---|
| 1244 | scsi_initialize_rq(rq); | 
|---|
| 1245 | return rq; | 
|---|
| 1246 | } | 
|---|
| 1247 | EXPORT_SYMBOL_GPL(scsi_alloc_request); | 
|---|
| 1248 |  | 
|---|
| 1249 | /* | 
|---|
| 1250 | * Only called when the request isn't completed by SCSI, and not freed by | 
|---|
| 1251 | * SCSI | 
|---|
| 1252 | */ | 
|---|
| 1253 | static void scsi_cleanup_rq(struct request *rq) | 
|---|
| 1254 | { | 
|---|
| 1255 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | 
|---|
| 1256 |  | 
|---|
| 1257 | cmd->flags = 0; | 
|---|
| 1258 |  | 
|---|
| 1259 | if (rq->rq_flags & RQF_DONTPREP) { | 
|---|
| 1260 | scsi_mq_uninit_cmd(cmd); | 
|---|
| 1261 | rq->rq_flags &= ~RQF_DONTPREP; | 
|---|
| 1262 | } | 
|---|
| 1263 | } | 
|---|
| 1264 |  | 
|---|
| 1265 | /* Called before a request is prepared. See also scsi_mq_prep_fn(). */ | 
|---|
| 1266 | void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) | 
|---|
| 1267 | { | 
|---|
| 1268 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 1269 |  | 
|---|
| 1270 | if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) { | 
|---|
| 1271 | cmd->flags |= SCMD_INITIALIZED; | 
|---|
| 1272 | scsi_initialize_rq(rq); | 
|---|
| 1273 | } | 
|---|
| 1274 |  | 
|---|
| 1275 | cmd->device = dev; | 
|---|
| 1276 | INIT_LIST_HEAD(list: &cmd->eh_entry); | 
|---|
| 1277 | INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); | 
|---|
| 1278 | } | 
|---|
| 1279 |  | 
|---|
| 1280 | static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, | 
|---|
| 1281 | struct request *req) | 
|---|
| 1282 | { | 
|---|
| 1283 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); | 
|---|
| 1284 |  | 
|---|
| 1285 | /* | 
|---|
| 1286 | * Passthrough requests may transfer data, in which case they must | 
|---|
| 1287 | * a bio attached to them.  Or they might contain a SCSI command | 
|---|
| 1288 | * that does not transfer data, in which case they may optionally | 
|---|
| 1289 | * submit a request without an attached bio. | 
|---|
| 1290 | */ | 
|---|
| 1291 | if (req->bio) { | 
|---|
| 1292 | blk_status_t ret = scsi_alloc_sgtables(cmd); | 
|---|
| 1293 | if (unlikely(ret != BLK_STS_OK)) | 
|---|
| 1294 | return ret; | 
|---|
| 1295 | } else { | 
|---|
| 1296 | BUG_ON(blk_rq_bytes(req)); | 
|---|
| 1297 |  | 
|---|
| 1298 | memset(s: &cmd->sdb, c: 0, n: sizeof(cmd->sdb)); | 
|---|
| 1299 | } | 
|---|
| 1300 |  | 
|---|
| 1301 | cmd->transfersize = blk_rq_bytes(rq: req); | 
|---|
| 1302 | return BLK_STS_OK; | 
|---|
| 1303 | } | 
|---|
| 1304 |  | 
|---|
| 1305 | static blk_status_t | 
|---|
| 1306 | scsi_device_state_check(struct scsi_device *sdev, struct request *req) | 
|---|
| 1307 | { | 
|---|
| 1308 | switch (sdev->sdev_state) { | 
|---|
| 1309 | case SDEV_CREATED: | 
|---|
| 1310 | return BLK_STS_OK; | 
|---|
| 1311 | case SDEV_OFFLINE: | 
|---|
| 1312 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 1313 | /* | 
|---|
| 1314 | * If the device is offline we refuse to process any | 
|---|
| 1315 | * commands.  The device must be brought online | 
|---|
| 1316 | * before trying any recovery commands. | 
|---|
| 1317 | */ | 
|---|
| 1318 | if (!sdev->offline_already) { | 
|---|
| 1319 | sdev->offline_already = true; | 
|---|
| 1320 | sdev_printk(KERN_ERR, sdev, | 
|---|
| 1321 | "rejecting I/O to offline device\n"); | 
|---|
| 1322 | } | 
|---|
| 1323 | return BLK_STS_IOERR; | 
|---|
| 1324 | case SDEV_DEL: | 
|---|
| 1325 | /* | 
|---|
| 1326 | * If the device is fully deleted, we refuse to | 
|---|
| 1327 | * process any commands as well. | 
|---|
| 1328 | */ | 
|---|
| 1329 | sdev_printk(KERN_ERR, sdev, | 
|---|
| 1330 | "rejecting I/O to dead device\n"); | 
|---|
| 1331 | return BLK_STS_IOERR; | 
|---|
| 1332 | case SDEV_BLOCK: | 
|---|
| 1333 | case SDEV_CREATED_BLOCK: | 
|---|
| 1334 | return BLK_STS_RESOURCE; | 
|---|
| 1335 | case SDEV_QUIESCE: | 
|---|
| 1336 | /* | 
|---|
| 1337 | * If the device is blocked we only accept power management | 
|---|
| 1338 | * commands. | 
|---|
| 1339 | */ | 
|---|
| 1340 | if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) | 
|---|
| 1341 | return BLK_STS_RESOURCE; | 
|---|
| 1342 | return BLK_STS_OK; | 
|---|
| 1343 | default: | 
|---|
| 1344 | /* | 
|---|
| 1345 | * For any other not fully online state we only allow | 
|---|
| 1346 | * power management commands. | 
|---|
| 1347 | */ | 
|---|
| 1348 | if (req && !(req->rq_flags & RQF_PM)) | 
|---|
| 1349 | return BLK_STS_OFFLINE; | 
|---|
| 1350 | return BLK_STS_OK; | 
|---|
| 1351 | } | 
|---|
| 1352 | } | 
|---|
| 1353 |  | 
|---|
| 1354 | /* | 
|---|
| 1355 | * scsi_dev_queue_ready: if we can send requests to sdev, assign one token | 
|---|
| 1356 | * and return the token else return -1. | 
|---|
| 1357 | */ | 
|---|
| 1358 | static inline int scsi_dev_queue_ready(struct request_queue *q, | 
|---|
| 1359 | struct scsi_device *sdev) | 
|---|
| 1360 | { | 
|---|
| 1361 | int token; | 
|---|
| 1362 |  | 
|---|
| 1363 | token = sbitmap_get(sb: &sdev->budget_map); | 
|---|
| 1364 | if (token < 0) | 
|---|
| 1365 | return -1; | 
|---|
| 1366 |  | 
|---|
| 1367 | if (!atomic_read(v: &sdev->device_blocked)) | 
|---|
| 1368 | return token; | 
|---|
| 1369 |  | 
|---|
| 1370 | /* | 
|---|
| 1371 | * Only unblock if no other commands are pending and | 
|---|
| 1372 | * if device_blocked has decreased to zero | 
|---|
| 1373 | */ | 
|---|
| 1374 | if (scsi_device_busy(sdev) > 1 || | 
|---|
| 1375 | atomic_dec_return(v: &sdev->device_blocked) > 0) { | 
|---|
| 1376 | sbitmap_put(sb: &sdev->budget_map, bitnr: token); | 
|---|
| 1377 | return -1; | 
|---|
| 1378 | } | 
|---|
| 1379 |  | 
|---|
| 1380 | SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, | 
|---|
| 1381 | "unblocking device at zero depth\n")); | 
|---|
| 1382 |  | 
|---|
| 1383 | return token; | 
|---|
| 1384 | } | 
|---|
| 1385 |  | 
|---|
| 1386 | /* | 
|---|
| 1387 | * scsi_target_queue_ready: checks if there we can send commands to target | 
|---|
| 1388 | * @sdev: scsi device on starget to check. | 
|---|
| 1389 | */ | 
|---|
| 1390 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | 
|---|
| 1391 | struct scsi_device *sdev) | 
|---|
| 1392 | { | 
|---|
| 1393 | struct scsi_target *starget = scsi_target(sdev); | 
|---|
| 1394 | unsigned int busy; | 
|---|
| 1395 |  | 
|---|
| 1396 | if (starget->single_lun) { | 
|---|
| 1397 | spin_lock_irq(lock: shost->host_lock); | 
|---|
| 1398 | if (starget->starget_sdev_user && | 
|---|
| 1399 | starget->starget_sdev_user != sdev) { | 
|---|
| 1400 | spin_unlock_irq(lock: shost->host_lock); | 
|---|
| 1401 | return 0; | 
|---|
| 1402 | } | 
|---|
| 1403 | starget->starget_sdev_user = sdev; | 
|---|
| 1404 | spin_unlock_irq(lock: shost->host_lock); | 
|---|
| 1405 | } | 
|---|
| 1406 |  | 
|---|
| 1407 | if (starget->can_queue <= 0) | 
|---|
| 1408 | return 1; | 
|---|
| 1409 |  | 
|---|
| 1410 | busy = atomic_inc_return(v: &starget->target_busy) - 1; | 
|---|
| 1411 | if (atomic_read(v: &starget->target_blocked) > 0) { | 
|---|
| 1412 | if (busy) | 
|---|
| 1413 | goto starved; | 
|---|
| 1414 |  | 
|---|
| 1415 | /* | 
|---|
| 1416 | * unblock after target_blocked iterates to zero | 
|---|
| 1417 | */ | 
|---|
| 1418 | if (atomic_dec_return(v: &starget->target_blocked) > 0) | 
|---|
| 1419 | goto out_dec; | 
|---|
| 1420 |  | 
|---|
| 1421 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, | 
|---|
| 1422 | "unblocking target at zero depth\n")); | 
|---|
| 1423 | } | 
|---|
| 1424 |  | 
|---|
| 1425 | if (busy >= starget->can_queue) | 
|---|
| 1426 | goto starved; | 
|---|
| 1427 |  | 
|---|
| 1428 | return 1; | 
|---|
| 1429 |  | 
|---|
| 1430 | starved: | 
|---|
| 1431 | spin_lock_irq(lock: shost->host_lock); | 
|---|
| 1432 | list_move_tail(list: &sdev->starved_entry, head: &shost->starved_list); | 
|---|
| 1433 | spin_unlock_irq(lock: shost->host_lock); | 
|---|
| 1434 | out_dec: | 
|---|
| 1435 | if (starget->can_queue > 0) | 
|---|
| 1436 | atomic_dec(v: &starget->target_busy); | 
|---|
| 1437 | return 0; | 
|---|
| 1438 | } | 
|---|
| 1439 |  | 
|---|
| 1440 | /* | 
|---|
| 1441 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | 
|---|
| 1442 | * return 0. We must end up running the queue again whenever 0 is | 
|---|
| 1443 | * returned, else IO can hang. | 
|---|
| 1444 | */ | 
|---|
| 1445 | static inline int scsi_host_queue_ready(struct request_queue *q, | 
|---|
| 1446 | struct Scsi_Host *shost, | 
|---|
| 1447 | struct scsi_device *sdev, | 
|---|
| 1448 | struct scsi_cmnd *cmd) | 
|---|
| 1449 | { | 
|---|
| 1450 | if (atomic_read(v: &shost->host_blocked) > 0) { | 
|---|
| 1451 | if (scsi_host_busy(shost) > 0) | 
|---|
| 1452 | goto starved; | 
|---|
| 1453 |  | 
|---|
| 1454 | /* | 
|---|
| 1455 | * unblock after host_blocked iterates to zero | 
|---|
| 1456 | */ | 
|---|
| 1457 | if (atomic_dec_return(v: &shost->host_blocked) > 0) | 
|---|
| 1458 | goto out_dec; | 
|---|
| 1459 |  | 
|---|
| 1460 | SCSI_LOG_MLQUEUE(3, | 
|---|
| 1461 | shost_printk(KERN_INFO, shost, | 
|---|
| 1462 | "unblocking host at zero depth\n")); | 
|---|
| 1463 | } | 
|---|
| 1464 |  | 
|---|
| 1465 | if (shost->host_self_blocked) | 
|---|
| 1466 | goto starved; | 
|---|
| 1467 |  | 
|---|
| 1468 | /* We're OK to process the command, so we can't be starved */ | 
|---|
| 1469 | if (!list_empty(head: &sdev->starved_entry)) { | 
|---|
| 1470 | spin_lock_irq(lock: shost->host_lock); | 
|---|
| 1471 | if (!list_empty(head: &sdev->starved_entry)) | 
|---|
| 1472 | list_del_init(entry: &sdev->starved_entry); | 
|---|
| 1473 | spin_unlock_irq(lock: shost->host_lock); | 
|---|
| 1474 | } | 
|---|
| 1475 |  | 
|---|
| 1476 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); | 
|---|
| 1477 |  | 
|---|
| 1478 | return 1; | 
|---|
| 1479 |  | 
|---|
| 1480 | starved: | 
|---|
| 1481 | spin_lock_irq(lock: shost->host_lock); | 
|---|
| 1482 | if (list_empty(head: &sdev->starved_entry)) | 
|---|
| 1483 | list_add_tail(new: &sdev->starved_entry, head: &shost->starved_list); | 
|---|
| 1484 | spin_unlock_irq(lock: shost->host_lock); | 
|---|
| 1485 | out_dec: | 
|---|
| 1486 | scsi_dec_host_busy(shost, cmd); | 
|---|
| 1487 | return 0; | 
|---|
| 1488 | } | 
|---|
| 1489 |  | 
|---|
| 1490 | /* | 
|---|
| 1491 | * Busy state exporting function for request stacking drivers. | 
|---|
| 1492 | * | 
|---|
| 1493 | * For efficiency, no lock is taken to check the busy state of | 
|---|
| 1494 | * shost/starget/sdev, since the returned value is not guaranteed and | 
|---|
| 1495 | * may be changed after request stacking drivers call the function, | 
|---|
| 1496 | * regardless of taking lock or not. | 
|---|
| 1497 | * | 
|---|
| 1498 | * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi | 
|---|
| 1499 | * needs to return 'not busy'. Otherwise, request stacking drivers | 
|---|
| 1500 | * may hold requests forever. | 
|---|
| 1501 | */ | 
|---|
| 1502 | static bool scsi_mq_lld_busy(struct request_queue *q) | 
|---|
| 1503 | { | 
|---|
| 1504 | struct scsi_device *sdev = q->queuedata; | 
|---|
| 1505 | struct Scsi_Host *shost; | 
|---|
| 1506 |  | 
|---|
| 1507 | if (blk_queue_dying(q)) | 
|---|
| 1508 | return false; | 
|---|
| 1509 |  | 
|---|
| 1510 | shost = sdev->host; | 
|---|
| 1511 |  | 
|---|
| 1512 | /* | 
|---|
| 1513 | * Ignore host/starget busy state. | 
|---|
| 1514 | * Since block layer does not have a concept of fairness across | 
|---|
| 1515 | * multiple queues, congestion of host/starget needs to be handled | 
|---|
| 1516 | * in SCSI layer. | 
|---|
| 1517 | */ | 
|---|
| 1518 | if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) | 
|---|
| 1519 | return true; | 
|---|
| 1520 |  | 
|---|
| 1521 | return false; | 
|---|
| 1522 | } | 
|---|
| 1523 |  | 
|---|
| 1524 | /* | 
|---|
| 1525 | * Block layer request completion callback. May be called from interrupt | 
|---|
| 1526 | * context. | 
|---|
| 1527 | */ | 
|---|
| 1528 | static void scsi_complete(struct request *rq) | 
|---|
| 1529 | { | 
|---|
| 1530 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | 
|---|
| 1531 | enum scsi_disposition disposition; | 
|---|
| 1532 |  | 
|---|
| 1533 | INIT_LIST_HEAD(list: &cmd->eh_entry); | 
|---|
| 1534 |  | 
|---|
| 1535 | atomic_inc(v: &cmd->device->iodone_cnt); | 
|---|
| 1536 | if (cmd->result) | 
|---|
| 1537 | atomic_inc(v: &cmd->device->ioerr_cnt); | 
|---|
| 1538 |  | 
|---|
| 1539 | disposition = scsi_decide_disposition(cmd); | 
|---|
| 1540 | if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd)) | 
|---|
| 1541 | disposition = SUCCESS; | 
|---|
| 1542 |  | 
|---|
| 1543 | scsi_log_completion(cmd, disposition); | 
|---|
| 1544 |  | 
|---|
| 1545 | switch (disposition) { | 
|---|
| 1546 | case SUCCESS: | 
|---|
| 1547 | scsi_finish_command(cmd); | 
|---|
| 1548 | break; | 
|---|
| 1549 | case NEEDS_RETRY: | 
|---|
| 1550 | scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); | 
|---|
| 1551 | break; | 
|---|
| 1552 | case ADD_TO_MLQUEUE: | 
|---|
| 1553 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | 
|---|
| 1554 | break; | 
|---|
| 1555 | default: | 
|---|
| 1556 | scsi_eh_scmd_add(cmd); | 
|---|
| 1557 | break; | 
|---|
| 1558 | } | 
|---|
| 1559 | } | 
|---|
| 1560 |  | 
|---|
| 1561 | /** | 
|---|
| 1562 | * scsi_dispatch_cmd - Dispatch a command to the low-level driver. | 
|---|
| 1563 | * @cmd: command block we are dispatching. | 
|---|
| 1564 | * | 
|---|
| 1565 | * Return: nonzero return request was rejected and device's queue needs to be | 
|---|
| 1566 | * plugged. | 
|---|
| 1567 | */ | 
|---|
| 1568 | static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | 
|---|
| 1569 | { | 
|---|
| 1570 | struct Scsi_Host *host = cmd->device->host; | 
|---|
| 1571 | int rtn = 0; | 
|---|
| 1572 |  | 
|---|
| 1573 | atomic_inc(v: &cmd->device->iorequest_cnt); | 
|---|
| 1574 |  | 
|---|
| 1575 | /* check if the device is still usable */ | 
|---|
| 1576 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { | 
|---|
| 1577 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT | 
|---|
| 1578 | * returns an immediate error upwards, and signals | 
|---|
| 1579 | * that the device is no longer present */ | 
|---|
| 1580 | cmd->result = DID_NO_CONNECT << 16; | 
|---|
| 1581 | goto done; | 
|---|
| 1582 | } | 
|---|
| 1583 |  | 
|---|
| 1584 | /* Check to see if the scsi lld made this device blocked. */ | 
|---|
| 1585 | if (unlikely(scsi_device_blocked(cmd->device))) { | 
|---|
| 1586 | /* | 
|---|
| 1587 | * in blocked state, the command is just put back on | 
|---|
| 1588 | * the device queue.  The suspend state has already | 
|---|
| 1589 | * blocked the queue so future requests should not | 
|---|
| 1590 | * occur until the device transitions out of the | 
|---|
| 1591 | * suspend state. | 
|---|
| 1592 | */ | 
|---|
| 1593 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, | 
|---|
| 1594 | "queuecommand : device blocked\n")); | 
|---|
| 1595 | atomic_dec(v: &cmd->device->iorequest_cnt); | 
|---|
| 1596 | return SCSI_MLQUEUE_DEVICE_BUSY; | 
|---|
| 1597 | } | 
|---|
| 1598 |  | 
|---|
| 1599 | /* Store the LUN value in cmnd, if needed. */ | 
|---|
| 1600 | if (cmd->device->lun_in_cdb) | 
|---|
| 1601 | cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | | 
|---|
| 1602 | (cmd->device->lun << 5 & 0xe0); | 
|---|
| 1603 |  | 
|---|
| 1604 | scsi_log_send(cmd); | 
|---|
| 1605 |  | 
|---|
| 1606 | /* | 
|---|
| 1607 | * Before we queue this command, check if the command | 
|---|
| 1608 | * length exceeds what the host adapter can handle. | 
|---|
| 1609 | */ | 
|---|
| 1610 | if (cmd->cmd_len > cmd->device->host->max_cmd_len) { | 
|---|
| 1611 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, | 
|---|
| 1612 | "queuecommand : command too long. " | 
|---|
| 1613 | "cdb_size=%d host->max_cmd_len=%d\n", | 
|---|
| 1614 | cmd->cmd_len, cmd->device->host->max_cmd_len)); | 
|---|
| 1615 | cmd->result = (DID_ABORT << 16); | 
|---|
| 1616 | goto done; | 
|---|
| 1617 | } | 
|---|
| 1618 |  | 
|---|
| 1619 | if (unlikely(host->shost_state == SHOST_DEL)) { | 
|---|
| 1620 | cmd->result = (DID_NO_CONNECT << 16); | 
|---|
| 1621 | goto done; | 
|---|
| 1622 |  | 
|---|
| 1623 | } | 
|---|
| 1624 |  | 
|---|
| 1625 | trace_scsi_dispatch_cmd_start(cmd); | 
|---|
| 1626 | rtn = host->hostt->queuecommand(host, cmd); | 
|---|
| 1627 | if (rtn) { | 
|---|
| 1628 | atomic_dec(v: &cmd->device->iorequest_cnt); | 
|---|
| 1629 | trace_scsi_dispatch_cmd_error(cmd, rtn); | 
|---|
| 1630 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && | 
|---|
| 1631 | rtn != SCSI_MLQUEUE_TARGET_BUSY) | 
|---|
| 1632 | rtn = SCSI_MLQUEUE_HOST_BUSY; | 
|---|
| 1633 |  | 
|---|
| 1634 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, | 
|---|
| 1635 | "queuecommand : request rejected\n")); | 
|---|
| 1636 | } | 
|---|
| 1637 |  | 
|---|
| 1638 | return rtn; | 
|---|
| 1639 | done: | 
|---|
| 1640 | scsi_done(cmd); | 
|---|
| 1641 | return 0; | 
|---|
| 1642 | } | 
|---|
| 1643 |  | 
|---|
| 1644 | /* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ | 
|---|
| 1645 | static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) | 
|---|
| 1646 | { | 
|---|
| 1647 | return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * | 
|---|
| 1648 | sizeof(struct scatterlist); | 
|---|
| 1649 | } | 
|---|
| 1650 |  | 
|---|
| 1651 | static blk_status_t scsi_prepare_cmd(struct request *req) | 
|---|
| 1652 | { | 
|---|
| 1653 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); | 
|---|
| 1654 | struct scsi_device *sdev = req->q->queuedata; | 
|---|
| 1655 | struct Scsi_Host *shost = sdev->host; | 
|---|
| 1656 | bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); | 
|---|
| 1657 | struct scatterlist *sg; | 
|---|
| 1658 |  | 
|---|
| 1659 | scsi_init_command(dev: sdev, cmd); | 
|---|
| 1660 |  | 
|---|
| 1661 | cmd->eh_eflags = 0; | 
|---|
| 1662 | cmd->prot_type = 0; | 
|---|
| 1663 | cmd->prot_flags = 0; | 
|---|
| 1664 | cmd->submitter = 0; | 
|---|
| 1665 | memset(s: &cmd->sdb, c: 0, n: sizeof(cmd->sdb)); | 
|---|
| 1666 | cmd->underflow = 0; | 
|---|
| 1667 | cmd->transfersize = 0; | 
|---|
| 1668 | cmd->host_scribble = NULL; | 
|---|
| 1669 | cmd->result = 0; | 
|---|
| 1670 | cmd->extra_len = 0; | 
|---|
| 1671 | cmd->state = 0; | 
|---|
| 1672 | if (in_flight) | 
|---|
| 1673 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); | 
|---|
| 1674 |  | 
|---|
| 1675 | cmd->prot_op = SCSI_PROT_NORMAL; | 
|---|
| 1676 | if (blk_rq_bytes(rq: req)) | 
|---|
| 1677 | cmd->sc_data_direction = rq_dma_dir(req); | 
|---|
| 1678 | else | 
|---|
| 1679 | cmd->sc_data_direction = DMA_NONE; | 
|---|
| 1680 |  | 
|---|
| 1681 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; | 
|---|
| 1682 | cmd->sdb.table.sgl = sg; | 
|---|
| 1683 |  | 
|---|
| 1684 | if (scsi_host_get_prot(shost)) { | 
|---|
| 1685 | memset(s: cmd->prot_sdb, c: 0, n: sizeof(struct scsi_data_buffer)); | 
|---|
| 1686 |  | 
|---|
| 1687 | cmd->prot_sdb->table.sgl = | 
|---|
| 1688 | (struct scatterlist *)(cmd->prot_sdb + 1); | 
|---|
| 1689 | } | 
|---|
| 1690 |  | 
|---|
| 1691 | /* | 
|---|
| 1692 | * Special handling for passthrough commands, which don't go to the ULP | 
|---|
| 1693 | * at all: | 
|---|
| 1694 | */ | 
|---|
| 1695 | if (blk_rq_is_passthrough(rq: req)) | 
|---|
| 1696 | return scsi_setup_scsi_cmnd(sdev, req); | 
|---|
| 1697 |  | 
|---|
| 1698 | if (sdev->handler && sdev->handler->prep_fn) { | 
|---|
| 1699 | blk_status_t ret = sdev->handler->prep_fn(sdev, req); | 
|---|
| 1700 |  | 
|---|
| 1701 | if (ret != BLK_STS_OK) | 
|---|
| 1702 | return ret; | 
|---|
| 1703 | } | 
|---|
| 1704 |  | 
|---|
| 1705 | /* Usually overridden by the ULP */ | 
|---|
| 1706 | cmd->allowed = 0; | 
|---|
| 1707 | memset(s: cmd->cmnd, c: 0, n: sizeof(cmd->cmnd)); | 
|---|
| 1708 | return scsi_cmd_to_driver(cmd)->init_command(cmd); | 
|---|
| 1709 | } | 
|---|
| 1710 |  | 
|---|
| 1711 | static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly) | 
|---|
| 1712 | { | 
|---|
| 1713 | struct request *req = scsi_cmd_to_rq(scmd: cmd); | 
|---|
| 1714 |  | 
|---|
| 1715 | switch (cmd->submitter) { | 
|---|
| 1716 | case SUBMITTED_BY_BLOCK_LAYER: | 
|---|
| 1717 | break; | 
|---|
| 1718 | case SUBMITTED_BY_SCSI_ERROR_HANDLER: | 
|---|
| 1719 | return scsi_eh_done(scmd: cmd); | 
|---|
| 1720 | case SUBMITTED_BY_SCSI_RESET_IOCTL: | 
|---|
| 1721 | return; | 
|---|
| 1722 | } | 
|---|
| 1723 |  | 
|---|
| 1724 | if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q))) | 
|---|
| 1725 | return; | 
|---|
| 1726 | if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) | 
|---|
| 1727 | return; | 
|---|
| 1728 | trace_scsi_dispatch_cmd_done(cmd); | 
|---|
| 1729 |  | 
|---|
| 1730 | if (complete_directly) | 
|---|
| 1731 | blk_mq_complete_request_direct(rq: req, complete: scsi_complete); | 
|---|
| 1732 | else | 
|---|
| 1733 | blk_mq_complete_request(rq: req); | 
|---|
| 1734 | } | 
|---|
| 1735 |  | 
|---|
| 1736 | void scsi_done(struct scsi_cmnd *cmd) | 
|---|
| 1737 | { | 
|---|
| 1738 | scsi_done_internal(cmd, complete_directly: false); | 
|---|
| 1739 | } | 
|---|
| 1740 | EXPORT_SYMBOL(scsi_done); | 
|---|
| 1741 |  | 
|---|
| 1742 | void scsi_done_direct(struct scsi_cmnd *cmd) | 
|---|
| 1743 | { | 
|---|
| 1744 | scsi_done_internal(cmd, complete_directly: true); | 
|---|
| 1745 | } | 
|---|
| 1746 | EXPORT_SYMBOL(scsi_done_direct); | 
|---|
| 1747 |  | 
|---|
| 1748 | static void scsi_mq_put_budget(struct request_queue *q, int budget_token) | 
|---|
| 1749 | { | 
|---|
| 1750 | struct scsi_device *sdev = q->queuedata; | 
|---|
| 1751 |  | 
|---|
| 1752 | sbitmap_put(sb: &sdev->budget_map, bitnr: budget_token); | 
|---|
| 1753 | } | 
|---|
| 1754 |  | 
|---|
| 1755 | /* | 
|---|
| 1756 | * When to reinvoke queueing after a resource shortage. It's 3 msecs to | 
|---|
| 1757 | * not change behaviour from the previous unplug mechanism, experimentation | 
|---|
| 1758 | * may prove this needs changing. | 
|---|
| 1759 | */ | 
|---|
| 1760 | #define SCSI_QUEUE_DELAY 3 | 
|---|
| 1761 |  | 
|---|
| 1762 | static int scsi_mq_get_budget(struct request_queue *q) | 
|---|
| 1763 | { | 
|---|
| 1764 | struct scsi_device *sdev = q->queuedata; | 
|---|
| 1765 | int token = scsi_dev_queue_ready(q, sdev); | 
|---|
| 1766 |  | 
|---|
| 1767 | if (token >= 0) | 
|---|
| 1768 | return token; | 
|---|
| 1769 |  | 
|---|
| 1770 | atomic_inc(v: &sdev->restarts); | 
|---|
| 1771 |  | 
|---|
| 1772 | /* | 
|---|
| 1773 | * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy). | 
|---|
| 1774 | * .restarts must be incremented before .device_busy is read because the | 
|---|
| 1775 | * code in scsi_run_queue_async() depends on the order of these operations. | 
|---|
| 1776 | */ | 
|---|
| 1777 | smp_mb__after_atomic(); | 
|---|
| 1778 |  | 
|---|
| 1779 | /* | 
|---|
| 1780 | * If all in-flight requests originated from this LUN are completed | 
|---|
| 1781 | * before reading .device_busy, sdev->device_busy will be observed as | 
|---|
| 1782 | * zero, then blk_mq_delay_run_hw_queues() will dispatch this request | 
|---|
| 1783 | * soon. Otherwise, completion of one of these requests will observe | 
|---|
| 1784 | * the .restarts flag, and the request queue will be run for handling | 
|---|
| 1785 | * this request, see scsi_end_request(). | 
|---|
| 1786 | */ | 
|---|
| 1787 | if (unlikely(scsi_device_busy(sdev) == 0 && | 
|---|
| 1788 | !scsi_device_blocked(sdev))) | 
|---|
| 1789 | blk_mq_delay_run_hw_queues(q: sdev->request_queue, SCSI_QUEUE_DELAY); | 
|---|
| 1790 | return -1; | 
|---|
| 1791 | } | 
|---|
| 1792 |  | 
|---|
| 1793 | static void scsi_mq_set_rq_budget_token(struct request *req, int token) | 
|---|
| 1794 | { | 
|---|
| 1795 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); | 
|---|
| 1796 |  | 
|---|
| 1797 | cmd->budget_token = token; | 
|---|
| 1798 | } | 
|---|
| 1799 |  | 
|---|
| 1800 | static int scsi_mq_get_rq_budget_token(struct request *req) | 
|---|
| 1801 | { | 
|---|
| 1802 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); | 
|---|
| 1803 |  | 
|---|
| 1804 | return cmd->budget_token; | 
|---|
| 1805 | } | 
|---|
| 1806 |  | 
|---|
| 1807 | static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, | 
|---|
| 1808 | const struct blk_mq_queue_data *bd) | 
|---|
| 1809 | { | 
|---|
| 1810 | struct request *req = bd->rq; | 
|---|
| 1811 | struct request_queue *q = req->q; | 
|---|
| 1812 | struct scsi_device *sdev = q->queuedata; | 
|---|
| 1813 | struct Scsi_Host *shost = sdev->host; | 
|---|
| 1814 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); | 
|---|
| 1815 | blk_status_t ret; | 
|---|
| 1816 | int reason; | 
|---|
| 1817 |  | 
|---|
| 1818 | WARN_ON_ONCE(cmd->budget_token < 0); | 
|---|
| 1819 |  | 
|---|
| 1820 | /* | 
|---|
| 1821 | * If the device is not in running state we will reject some or all | 
|---|
| 1822 | * commands. | 
|---|
| 1823 | */ | 
|---|
| 1824 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { | 
|---|
| 1825 | ret = scsi_device_state_check(sdev, req); | 
|---|
| 1826 | if (ret != BLK_STS_OK) | 
|---|
| 1827 | goto out_put_budget; | 
|---|
| 1828 | } | 
|---|
| 1829 |  | 
|---|
| 1830 | ret = BLK_STS_RESOURCE; | 
|---|
| 1831 | if (!scsi_target_queue_ready(shost, sdev)) | 
|---|
| 1832 | goto out_put_budget; | 
|---|
| 1833 | if (unlikely(scsi_host_in_recovery(shost))) { | 
|---|
| 1834 | if (cmd->flags & SCMD_FAIL_IF_RECOVERING) | 
|---|
| 1835 | ret = BLK_STS_OFFLINE; | 
|---|
| 1836 | goto out_dec_target_busy; | 
|---|
| 1837 | } | 
|---|
| 1838 | if (!scsi_host_queue_ready(q, shost, sdev, cmd)) | 
|---|
| 1839 | goto out_dec_target_busy; | 
|---|
| 1840 |  | 
|---|
| 1841 | /* | 
|---|
| 1842 | * Only clear the driver-private command data if the LLD does not supply | 
|---|
| 1843 | * a function to initialize that data. | 
|---|
| 1844 | */ | 
|---|
| 1845 | if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) | 
|---|
| 1846 | memset(s: scsi_cmd_priv(cmd), c: 0, n: shost->hostt->cmd_size); | 
|---|
| 1847 |  | 
|---|
| 1848 | if (!(req->rq_flags & RQF_DONTPREP)) { | 
|---|
| 1849 | ret = scsi_prepare_cmd(req); | 
|---|
| 1850 | if (ret != BLK_STS_OK) | 
|---|
| 1851 | goto out_dec_host_busy; | 
|---|
| 1852 | req->rq_flags |= RQF_DONTPREP; | 
|---|
| 1853 | } else { | 
|---|
| 1854 | clear_bit(SCMD_STATE_COMPLETE, addr: &cmd->state); | 
|---|
| 1855 | } | 
|---|
| 1856 |  | 
|---|
| 1857 | cmd->flags &= SCMD_PRESERVED_FLAGS; | 
|---|
| 1858 | if (sdev->simple_tags) | 
|---|
| 1859 | cmd->flags |= SCMD_TAGGED; | 
|---|
| 1860 | if (bd->last) | 
|---|
| 1861 | cmd->flags |= SCMD_LAST; | 
|---|
| 1862 |  | 
|---|
| 1863 | scsi_set_resid(cmd, resid: 0); | 
|---|
| 1864 | memset(s: cmd->sense_buffer, c: 0, SCSI_SENSE_BUFFERSIZE); | 
|---|
| 1865 | cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; | 
|---|
| 1866 |  | 
|---|
| 1867 | blk_mq_start_request(rq: req); | 
|---|
| 1868 | reason = scsi_dispatch_cmd(cmd); | 
|---|
| 1869 | if (reason) { | 
|---|
| 1870 | scsi_set_blocked(cmd, reason); | 
|---|
| 1871 | ret = BLK_STS_RESOURCE; | 
|---|
| 1872 | goto out_dec_host_busy; | 
|---|
| 1873 | } | 
|---|
| 1874 |  | 
|---|
| 1875 | return BLK_STS_OK; | 
|---|
| 1876 |  | 
|---|
| 1877 | out_dec_host_busy: | 
|---|
| 1878 | scsi_dec_host_busy(shost, cmd); | 
|---|
| 1879 | out_dec_target_busy: | 
|---|
| 1880 | if (scsi_target(sdev)->can_queue > 0) | 
|---|
| 1881 | atomic_dec(v: &scsi_target(sdev)->target_busy); | 
|---|
| 1882 | out_put_budget: | 
|---|
| 1883 | scsi_mq_put_budget(q, budget_token: cmd->budget_token); | 
|---|
| 1884 | cmd->budget_token = -1; | 
|---|
| 1885 | switch (ret) { | 
|---|
| 1886 | case BLK_STS_OK: | 
|---|
| 1887 | break; | 
|---|
| 1888 | case BLK_STS_RESOURCE: | 
|---|
| 1889 | if (scsi_device_blocked(sdev)) | 
|---|
| 1890 | ret = BLK_STS_DEV_RESOURCE; | 
|---|
| 1891 | break; | 
|---|
| 1892 | case BLK_STS_AGAIN: | 
|---|
| 1893 | cmd->result = DID_BUS_BUSY << 16; | 
|---|
| 1894 | if (req->rq_flags & RQF_DONTPREP) | 
|---|
| 1895 | scsi_mq_uninit_cmd(cmd); | 
|---|
| 1896 | break; | 
|---|
| 1897 | default: | 
|---|
| 1898 | if (unlikely(!scsi_device_online(sdev))) | 
|---|
| 1899 | cmd->result = DID_NO_CONNECT << 16; | 
|---|
| 1900 | else | 
|---|
| 1901 | cmd->result = DID_ERROR << 16; | 
|---|
| 1902 | /* | 
|---|
| 1903 | * Make sure to release all allocated resources when | 
|---|
| 1904 | * we hit an error, as we will never see this command | 
|---|
| 1905 | * again. | 
|---|
| 1906 | */ | 
|---|
| 1907 | if (req->rq_flags & RQF_DONTPREP) | 
|---|
| 1908 | scsi_mq_uninit_cmd(cmd); | 
|---|
| 1909 | scsi_run_queue_async(sdev); | 
|---|
| 1910 | break; | 
|---|
| 1911 | } | 
|---|
| 1912 | return ret; | 
|---|
| 1913 | } | 
|---|
| 1914 |  | 
|---|
| 1915 | static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, | 
|---|
| 1916 | unsigned int hctx_idx, unsigned int numa_node) | 
|---|
| 1917 | { | 
|---|
| 1918 | struct Scsi_Host *shost = set->driver_data; | 
|---|
| 1919 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | 
|---|
| 1920 | struct scatterlist *sg; | 
|---|
| 1921 | int ret = 0; | 
|---|
| 1922 |  | 
|---|
| 1923 | cmd->sense_buffer = | 
|---|
| 1924 | kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node); | 
|---|
| 1925 | if (!cmd->sense_buffer) | 
|---|
| 1926 | return -ENOMEM; | 
|---|
| 1927 |  | 
|---|
| 1928 | if (scsi_host_get_prot(shost)) { | 
|---|
| 1929 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + | 
|---|
| 1930 | shost->hostt->cmd_size; | 
|---|
| 1931 | cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); | 
|---|
| 1932 | } | 
|---|
| 1933 |  | 
|---|
| 1934 | if (shost->hostt->init_cmd_priv) { | 
|---|
| 1935 | ret = shost->hostt->init_cmd_priv(shost, cmd); | 
|---|
| 1936 | if (ret < 0) | 
|---|
| 1937 | kmem_cache_free(s: scsi_sense_cache, objp: cmd->sense_buffer); | 
|---|
| 1938 | } | 
|---|
| 1939 |  | 
|---|
| 1940 | return ret; | 
|---|
| 1941 | } | 
|---|
| 1942 |  | 
|---|
| 1943 | static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, | 
|---|
| 1944 | unsigned int hctx_idx) | 
|---|
| 1945 | { | 
|---|
| 1946 | struct Scsi_Host *shost = set->driver_data; | 
|---|
| 1947 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | 
|---|
| 1948 |  | 
|---|
| 1949 | if (shost->hostt->exit_cmd_priv) | 
|---|
| 1950 | shost->hostt->exit_cmd_priv(shost, cmd); | 
|---|
| 1951 | kmem_cache_free(s: scsi_sense_cache, objp: cmd->sense_buffer); | 
|---|
| 1952 | } | 
|---|
| 1953 |  | 
|---|
| 1954 |  | 
|---|
| 1955 | static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) | 
|---|
| 1956 | { | 
|---|
| 1957 | struct Scsi_Host *shost = hctx->driver_data; | 
|---|
| 1958 |  | 
|---|
| 1959 | if (shost->hostt->mq_poll) | 
|---|
| 1960 | return shost->hostt->mq_poll(shost, hctx->queue_num); | 
|---|
| 1961 |  | 
|---|
| 1962 | return 0; | 
|---|
| 1963 | } | 
|---|
| 1964 |  | 
|---|
| 1965 | static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | 
|---|
| 1966 | unsigned int hctx_idx) | 
|---|
| 1967 | { | 
|---|
| 1968 | struct Scsi_Host *shost = data; | 
|---|
| 1969 |  | 
|---|
| 1970 | hctx->driver_data = shost; | 
|---|
| 1971 | return 0; | 
|---|
| 1972 | } | 
|---|
| 1973 |  | 
|---|
| 1974 | static void scsi_map_queues(struct blk_mq_tag_set *set) | 
|---|
| 1975 | { | 
|---|
| 1976 | struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); | 
|---|
| 1977 |  | 
|---|
| 1978 | if (shost->hostt->map_queues) | 
|---|
| 1979 | return shost->hostt->map_queues(shost); | 
|---|
| 1980 | blk_mq_map_queues(qmap: &set->map[HCTX_TYPE_DEFAULT]); | 
|---|
| 1981 | } | 
|---|
| 1982 |  | 
|---|
| 1983 | void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) | 
|---|
| 1984 | { | 
|---|
| 1985 | struct device *dev = shost->dma_dev; | 
|---|
| 1986 |  | 
|---|
| 1987 | memset(s: lim, c: 0, n: sizeof(*lim)); | 
|---|
| 1988 | lim->max_segments = | 
|---|
| 1989 | min_t(unsigned short, shost->sg_tablesize, SG_MAX_SEGMENTS); | 
|---|
| 1990 |  | 
|---|
| 1991 | if (scsi_host_prot_dma(shost)) { | 
|---|
| 1992 | shost->sg_prot_tablesize = | 
|---|
| 1993 | min_not_zero(shost->sg_prot_tablesize, | 
|---|
| 1994 | (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); | 
|---|
| 1995 | BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); | 
|---|
| 1996 | lim->max_integrity_segments = shost->sg_prot_tablesize; | 
|---|
| 1997 | } | 
|---|
| 1998 |  | 
|---|
| 1999 | lim->max_hw_sectors = shost->max_sectors; | 
|---|
| 2000 | lim->seg_boundary_mask = shost->dma_boundary; | 
|---|
| 2001 | lim->max_segment_size = shost->max_segment_size; | 
|---|
| 2002 | lim->virt_boundary_mask = shost->virt_boundary_mask; | 
|---|
| 2003 | lim->dma_alignment = max_t(unsigned int, | 
|---|
| 2004 | shost->dma_alignment, dma_get_cache_alignment() - 1); | 
|---|
| 2005 |  | 
|---|
| 2006 | /* | 
|---|
| 2007 | * Propagate the DMA formation properties to the dma-mapping layer as | 
|---|
| 2008 | * a courtesy service to the LLDDs.  This needs to check that the buses | 
|---|
| 2009 | * actually support the DMA API first, though. | 
|---|
| 2010 | */ | 
|---|
| 2011 | if (dev->dma_parms) { | 
|---|
| 2012 | dma_set_seg_boundary(dev, mask: shost->dma_boundary); | 
|---|
| 2013 | dma_set_max_seg_size(dev, size: shost->max_segment_size); | 
|---|
| 2014 | } | 
|---|
| 2015 | } | 
|---|
| 2016 | EXPORT_SYMBOL_GPL(scsi_init_limits); | 
|---|
| 2017 |  | 
|---|
| 2018 | static const struct blk_mq_ops scsi_mq_ops_no_commit = { | 
|---|
| 2019 | .get_budget	= scsi_mq_get_budget, | 
|---|
| 2020 | .put_budget	= scsi_mq_put_budget, | 
|---|
| 2021 | .queue_rq	= scsi_queue_rq, | 
|---|
| 2022 | .complete	= scsi_complete, | 
|---|
| 2023 | .timeout	= scsi_timeout, | 
|---|
| 2024 | #ifdef CONFIG_BLK_DEBUG_FS | 
|---|
| 2025 | .show_rq	= scsi_show_rq, | 
|---|
| 2026 | #endif | 
|---|
| 2027 | .init_request	= scsi_mq_init_request, | 
|---|
| 2028 | .exit_request	= scsi_mq_exit_request, | 
|---|
| 2029 | .cleanup_rq	= scsi_cleanup_rq, | 
|---|
| 2030 | .busy		= scsi_mq_lld_busy, | 
|---|
| 2031 | .map_queues	= scsi_map_queues, | 
|---|
| 2032 | .init_hctx	= scsi_init_hctx, | 
|---|
| 2033 | .poll		= scsi_mq_poll, | 
|---|
| 2034 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, | 
|---|
| 2035 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, | 
|---|
| 2036 | }; | 
|---|
| 2037 |  | 
|---|
| 2038 |  | 
|---|
| 2039 | static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) | 
|---|
| 2040 | { | 
|---|
| 2041 | struct Scsi_Host *shost = hctx->driver_data; | 
|---|
| 2042 |  | 
|---|
| 2043 | shost->hostt->commit_rqs(shost, hctx->queue_num); | 
|---|
| 2044 | } | 
|---|
| 2045 |  | 
|---|
| 2046 | static const struct blk_mq_ops scsi_mq_ops = { | 
|---|
| 2047 | .get_budget	= scsi_mq_get_budget, | 
|---|
| 2048 | .put_budget	= scsi_mq_put_budget, | 
|---|
| 2049 | .queue_rq	= scsi_queue_rq, | 
|---|
| 2050 | .commit_rqs	= scsi_commit_rqs, | 
|---|
| 2051 | .complete	= scsi_complete, | 
|---|
| 2052 | .timeout	= scsi_timeout, | 
|---|
| 2053 | #ifdef CONFIG_BLK_DEBUG_FS | 
|---|
| 2054 | .show_rq	= scsi_show_rq, | 
|---|
| 2055 | #endif | 
|---|
| 2056 | .init_request	= scsi_mq_init_request, | 
|---|
| 2057 | .exit_request	= scsi_mq_exit_request, | 
|---|
| 2058 | .cleanup_rq	= scsi_cleanup_rq, | 
|---|
| 2059 | .busy		= scsi_mq_lld_busy, | 
|---|
| 2060 | .map_queues	= scsi_map_queues, | 
|---|
| 2061 | .init_hctx	= scsi_init_hctx, | 
|---|
| 2062 | .poll		= scsi_mq_poll, | 
|---|
| 2063 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, | 
|---|
| 2064 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, | 
|---|
| 2065 | }; | 
|---|
| 2066 |  | 
|---|
| 2067 | int scsi_mq_setup_tags(struct Scsi_Host *shost) | 
|---|
| 2068 | { | 
|---|
| 2069 | unsigned int cmd_size, sgl_size; | 
|---|
| 2070 | struct blk_mq_tag_set *tag_set = &shost->tag_set; | 
|---|
| 2071 |  | 
|---|
| 2072 | sgl_size = max_t(unsigned int, sizeof(struct scatterlist), | 
|---|
| 2073 | scsi_mq_inline_sgl_size(shost)); | 
|---|
| 2074 | cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; | 
|---|
| 2075 | if (scsi_host_get_prot(shost)) | 
|---|
| 2076 | cmd_size += sizeof(struct scsi_data_buffer) + | 
|---|
| 2077 | sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; | 
|---|
| 2078 |  | 
|---|
| 2079 | memset(s: tag_set, c: 0, n: sizeof(*tag_set)); | 
|---|
| 2080 | if (shost->hostt->commit_rqs) | 
|---|
| 2081 | tag_set->ops = &scsi_mq_ops; | 
|---|
| 2082 | else | 
|---|
| 2083 | tag_set->ops = &scsi_mq_ops_no_commit; | 
|---|
| 2084 | tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; | 
|---|
| 2085 | tag_set->nr_maps = shost->nr_maps ? : 1; | 
|---|
| 2086 | tag_set->queue_depth = shost->can_queue; | 
|---|
| 2087 | tag_set->cmd_size = cmd_size; | 
|---|
| 2088 | tag_set->numa_node = dev_to_node(dev: shost->dma_dev); | 
|---|
| 2089 | if (shost->hostt->tag_alloc_policy_rr) | 
|---|
| 2090 | tag_set->flags |= BLK_MQ_F_TAG_RR; | 
|---|
| 2091 | if (shost->queuecommand_may_block) | 
|---|
| 2092 | tag_set->flags |= BLK_MQ_F_BLOCKING; | 
|---|
| 2093 | tag_set->driver_data = shost; | 
|---|
| 2094 | if (shost->host_tagset) | 
|---|
| 2095 | tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; | 
|---|
| 2096 |  | 
|---|
| 2097 | return blk_mq_alloc_tag_set(set: tag_set); | 
|---|
| 2098 | } | 
|---|
| 2099 |  | 
|---|
| 2100 | void scsi_mq_free_tags(struct kref *kref) | 
|---|
| 2101 | { | 
|---|
| 2102 | struct Scsi_Host *shost = container_of(kref, typeof(*shost), | 
|---|
| 2103 | tagset_refcnt); | 
|---|
| 2104 |  | 
|---|
| 2105 | blk_mq_free_tag_set(set: &shost->tag_set); | 
|---|
| 2106 | complete(&shost->tagset_freed); | 
|---|
| 2107 | } | 
|---|
| 2108 |  | 
|---|
| 2109 | /** | 
|---|
| 2110 | * scsi_device_from_queue - return sdev associated with a request_queue | 
|---|
| 2111 | * @q: The request queue to return the sdev from | 
|---|
| 2112 | * | 
|---|
| 2113 | * Return the sdev associated with a request queue or NULL if the | 
|---|
| 2114 | * request_queue does not reference a SCSI device. | 
|---|
| 2115 | */ | 
|---|
| 2116 | struct scsi_device *scsi_device_from_queue(struct request_queue *q) | 
|---|
| 2117 | { | 
|---|
| 2118 | struct scsi_device *sdev = NULL; | 
|---|
| 2119 |  | 
|---|
| 2120 | if (q->mq_ops == &scsi_mq_ops_no_commit || | 
|---|
| 2121 | q->mq_ops == &scsi_mq_ops) | 
|---|
| 2122 | sdev = q->queuedata; | 
|---|
| 2123 | if (!sdev || !get_device(dev: &sdev->sdev_gendev)) | 
|---|
| 2124 | sdev = NULL; | 
|---|
| 2125 |  | 
|---|
| 2126 | return sdev; | 
|---|
| 2127 | } | 
|---|
| 2128 | /* | 
|---|
| 2129 | * pktcdvd should have been integrated into the SCSI layers, but for historical | 
|---|
| 2130 | * reasons like the old IDE driver it isn't.  This export allows it to safely | 
|---|
| 2131 | * probe if a given device is a SCSI one and only attach to that. | 
|---|
| 2132 | */ | 
|---|
| 2133 | #ifdef CONFIG_CDROM_PKTCDVD_MODULE | 
|---|
| 2134 | EXPORT_SYMBOL_GPL(scsi_device_from_queue); | 
|---|
| 2135 | #endif | 
|---|
| 2136 |  | 
|---|
| 2137 | /** | 
|---|
| 2138 | * scsi_block_requests - Utility function used by low-level drivers to prevent | 
|---|
| 2139 | * further commands from being queued to the device. | 
|---|
| 2140 | * @shost:  host in question | 
|---|
| 2141 | * | 
|---|
| 2142 | * There is no timer nor any other means by which the requests get unblocked | 
|---|
| 2143 | * other than the low-level driver calling scsi_unblock_requests(). | 
|---|
| 2144 | */ | 
|---|
| 2145 | void scsi_block_requests(struct Scsi_Host *shost) | 
|---|
| 2146 | { | 
|---|
| 2147 | shost->host_self_blocked = 1; | 
|---|
| 2148 | } | 
|---|
| 2149 | EXPORT_SYMBOL(scsi_block_requests); | 
|---|
| 2150 |  | 
|---|
| 2151 | /** | 
|---|
| 2152 | * scsi_unblock_requests - Utility function used by low-level drivers to allow | 
|---|
| 2153 | * further commands to be queued to the device. | 
|---|
| 2154 | * @shost:  host in question | 
|---|
| 2155 | * | 
|---|
| 2156 | * There is no timer nor any other means by which the requests get unblocked | 
|---|
| 2157 | * other than the low-level driver calling scsi_unblock_requests(). This is done | 
|---|
| 2158 | * as an API function so that changes to the internals of the scsi mid-layer | 
|---|
| 2159 | * won't require wholesale changes to drivers that use this feature. | 
|---|
| 2160 | */ | 
|---|
| 2161 | void scsi_unblock_requests(struct Scsi_Host *shost) | 
|---|
| 2162 | { | 
|---|
| 2163 | shost->host_self_blocked = 0; | 
|---|
| 2164 | scsi_run_host_queues(shost); | 
|---|
| 2165 | } | 
|---|
| 2166 | EXPORT_SYMBOL(scsi_unblock_requests); | 
|---|
| 2167 |  | 
|---|
| 2168 | void scsi_exit_queue(void) | 
|---|
| 2169 | { | 
|---|
| 2170 | kmem_cache_destroy(s: scsi_sense_cache); | 
|---|
| 2171 | } | 
|---|
| 2172 |  | 
|---|
| 2173 | /** | 
|---|
| 2174 | *	scsi_mode_select - issue a mode select | 
|---|
| 2175 | *	@sdev:	SCSI device to be queried | 
|---|
| 2176 | *	@pf:	Page format bit (1 == standard, 0 == vendor specific) | 
|---|
| 2177 | *	@sp:	Save page bit (0 == don't save, 1 == save) | 
|---|
| 2178 | *	@buffer: request buffer (may not be smaller than eight bytes) | 
|---|
| 2179 | *	@len:	length of request buffer. | 
|---|
| 2180 | *	@timeout: command timeout | 
|---|
| 2181 | *	@retries: number of retries before failing | 
|---|
| 2182 | *	@data: returns a structure abstracting the mode header data | 
|---|
| 2183 | *	@sshdr: place to put sense data (or NULL if no sense to be collected). | 
|---|
| 2184 | *		must be SCSI_SENSE_BUFFERSIZE big. | 
|---|
| 2185 | * | 
|---|
| 2186 | *	Returns zero if successful; negative error number or scsi | 
|---|
| 2187 | *	status on error | 
|---|
| 2188 | * | 
|---|
| 2189 | */ | 
|---|
| 2190 | int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, | 
|---|
| 2191 | unsigned char *buffer, int len, int timeout, int retries, | 
|---|
| 2192 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) | 
|---|
| 2193 | { | 
|---|
| 2194 | unsigned char cmd[10]; | 
|---|
| 2195 | unsigned char *real_buffer; | 
|---|
| 2196 | const struct scsi_exec_args exec_args = { | 
|---|
| 2197 | .sshdr = sshdr, | 
|---|
| 2198 | }; | 
|---|
| 2199 | int ret; | 
|---|
| 2200 |  | 
|---|
| 2201 | memset(s: cmd, c: 0, n: sizeof(cmd)); | 
|---|
| 2202 | cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); | 
|---|
| 2203 |  | 
|---|
| 2204 | /* | 
|---|
| 2205 | * Use MODE SELECT(10) if the device asked for it or if the mode page | 
|---|
| 2206 | * and the mode select header cannot fit within the maximumm 255 bytes | 
|---|
| 2207 | * of the MODE SELECT(6) command. | 
|---|
| 2208 | */ | 
|---|
| 2209 | if (sdev->use_10_for_ms || | 
|---|
| 2210 | len + 4 > 255 || | 
|---|
| 2211 | data->block_descriptor_length > 255) { | 
|---|
| 2212 | if (len > 65535 - 8) | 
|---|
| 2213 | return -EINVAL; | 
|---|
| 2214 | real_buffer = kmalloc(8 + len, GFP_KERNEL); | 
|---|
| 2215 | if (!real_buffer) | 
|---|
| 2216 | return -ENOMEM; | 
|---|
| 2217 | memcpy(to: real_buffer + 8, from: buffer, len); | 
|---|
| 2218 | len += 8; | 
|---|
| 2219 | real_buffer[0] = 0; | 
|---|
| 2220 | real_buffer[1] = 0; | 
|---|
| 2221 | real_buffer[2] = data->medium_type; | 
|---|
| 2222 | real_buffer[3] = data->device_specific; | 
|---|
| 2223 | real_buffer[4] = data->longlba ? 0x01 : 0; | 
|---|
| 2224 | real_buffer[5] = 0; | 
|---|
| 2225 | put_unaligned_be16(val: data->block_descriptor_length, | 
|---|
| 2226 | p: &real_buffer[6]); | 
|---|
| 2227 |  | 
|---|
| 2228 | cmd[0] = MODE_SELECT_10; | 
|---|
| 2229 | put_unaligned_be16(val: len, p: &cmd[7]); | 
|---|
| 2230 | } else { | 
|---|
| 2231 | if (data->longlba) | 
|---|
| 2232 | return -EINVAL; | 
|---|
| 2233 |  | 
|---|
| 2234 | real_buffer = kmalloc(4 + len, GFP_KERNEL); | 
|---|
| 2235 | if (!real_buffer) | 
|---|
| 2236 | return -ENOMEM; | 
|---|
| 2237 | memcpy(to: real_buffer + 4, from: buffer, len); | 
|---|
| 2238 | len += 4; | 
|---|
| 2239 | real_buffer[0] = 0; | 
|---|
| 2240 | real_buffer[1] = data->medium_type; | 
|---|
| 2241 | real_buffer[2] = data->device_specific; | 
|---|
| 2242 | real_buffer[3] = data->block_descriptor_length; | 
|---|
| 2243 |  | 
|---|
| 2244 | cmd[0] = MODE_SELECT; | 
|---|
| 2245 | cmd[4] = len; | 
|---|
| 2246 | } | 
|---|
| 2247 |  | 
|---|
| 2248 | ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len, | 
|---|
| 2249 | timeout, retries, &exec_args); | 
|---|
| 2250 | kfree(objp: real_buffer); | 
|---|
| 2251 | return ret; | 
|---|
| 2252 | } | 
|---|
| 2253 | EXPORT_SYMBOL_GPL(scsi_mode_select); | 
|---|
| 2254 |  | 
|---|
| 2255 | /** | 
|---|
| 2256 | *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. | 
|---|
| 2257 | *	@sdev:	SCSI device to be queried | 
|---|
| 2258 | *	@dbd:	set to prevent mode sense from returning block descriptors | 
|---|
| 2259 | *	@modepage: mode page being requested | 
|---|
| 2260 | *	@subpage: sub-page of the mode page being requested | 
|---|
| 2261 | *	@buffer: request buffer (may not be smaller than eight bytes) | 
|---|
| 2262 | *	@len:	length of request buffer. | 
|---|
| 2263 | *	@timeout: command timeout | 
|---|
| 2264 | *	@retries: number of retries before failing | 
|---|
| 2265 | *	@data: returns a structure abstracting the mode header data | 
|---|
| 2266 | *	@sshdr: place to put sense data (or NULL if no sense to be collected). | 
|---|
| 2267 | *		must be SCSI_SENSE_BUFFERSIZE big. | 
|---|
| 2268 | * | 
|---|
| 2269 | *	Returns zero if successful, or a negative error number on failure | 
|---|
| 2270 | */ | 
|---|
| 2271 | int | 
|---|
| 2272 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, | 
|---|
| 2273 | unsigned char *buffer, int len, int timeout, int retries, | 
|---|
| 2274 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) | 
|---|
| 2275 | { | 
|---|
| 2276 | unsigned char cmd[12]; | 
|---|
| 2277 | int use_10_for_ms; | 
|---|
| 2278 | int ; | 
|---|
| 2279 | int result; | 
|---|
| 2280 | struct scsi_sense_hdr my_sshdr; | 
|---|
| 2281 | struct scsi_failure failure_defs[] = { | 
|---|
| 2282 | { | 
|---|
| 2283 | .sense = UNIT_ATTENTION, | 
|---|
| 2284 | .asc = SCMD_FAILURE_ASC_ANY, | 
|---|
| 2285 | .ascq = SCMD_FAILURE_ASCQ_ANY, | 
|---|
| 2286 | .allowed = retries, | 
|---|
| 2287 | .result = SAM_STAT_CHECK_CONDITION, | 
|---|
| 2288 | }, | 
|---|
| 2289 | {} | 
|---|
| 2290 | }; | 
|---|
| 2291 | struct scsi_failures failures = { | 
|---|
| 2292 | .failure_definitions = failure_defs, | 
|---|
| 2293 | }; | 
|---|
| 2294 | const struct scsi_exec_args exec_args = { | 
|---|
| 2295 | /* caller might not be interested in sense, but we need it */ | 
|---|
| 2296 | .sshdr = sshdr ? : &my_sshdr, | 
|---|
| 2297 | .failures = &failures, | 
|---|
| 2298 | }; | 
|---|
| 2299 |  | 
|---|
| 2300 | memset(s: data, c: 0, n: sizeof(*data)); | 
|---|
| 2301 | memset(s: &cmd[0], c: 0, n: 12); | 
|---|
| 2302 |  | 
|---|
| 2303 | dbd = sdev->set_dbd_for_ms ? 8 : dbd; | 
|---|
| 2304 | cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */ | 
|---|
| 2305 | cmd[2] = modepage; | 
|---|
| 2306 | cmd[3] = subpage; | 
|---|
| 2307 |  | 
|---|
| 2308 | sshdr = exec_args.sshdr; | 
|---|
| 2309 |  | 
|---|
| 2310 | retry: | 
|---|
| 2311 | use_10_for_ms = sdev->use_10_for_ms || len > 255; | 
|---|
| 2312 |  | 
|---|
| 2313 | if (use_10_for_ms) { | 
|---|
| 2314 | if (len < 8 || len > 65535) | 
|---|
| 2315 | return -EINVAL; | 
|---|
| 2316 |  | 
|---|
| 2317 | cmd[0] = MODE_SENSE_10; | 
|---|
| 2318 | put_unaligned_be16(val: len, p: &cmd[7]); | 
|---|
| 2319 | header_length = 8; | 
|---|
| 2320 | } else { | 
|---|
| 2321 | if (len < 4) | 
|---|
| 2322 | return -EINVAL; | 
|---|
| 2323 |  | 
|---|
| 2324 | cmd[0] = MODE_SENSE; | 
|---|
| 2325 | cmd[4] = len; | 
|---|
| 2326 | header_length = 4; | 
|---|
| 2327 | } | 
|---|
| 2328 |  | 
|---|
| 2329 | memset(s: buffer, c: 0, n: len); | 
|---|
| 2330 |  | 
|---|
| 2331 | result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, | 
|---|
| 2332 | timeout, retries, &exec_args); | 
|---|
| 2333 | if (result < 0) | 
|---|
| 2334 | return result; | 
|---|
| 2335 |  | 
|---|
| 2336 | /* This code looks awful: what it's doing is making sure an | 
|---|
| 2337 | * ILLEGAL REQUEST sense return identifies the actual command | 
|---|
| 2338 | * byte as the problem.  MODE_SENSE commands can return | 
|---|
| 2339 | * ILLEGAL REQUEST if the code page isn't supported */ | 
|---|
| 2340 |  | 
|---|
| 2341 | if (!scsi_status_is_good(status: result)) { | 
|---|
| 2342 | if (scsi_sense_valid(sshdr)) { | 
|---|
| 2343 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && | 
|---|
| 2344 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { | 
|---|
| 2345 | /* | 
|---|
| 2346 | * Invalid command operation code: retry using | 
|---|
| 2347 | * MODE SENSE(6) if this was a MODE SENSE(10) | 
|---|
| 2348 | * request, except if the request mode page is | 
|---|
| 2349 | * too large for MODE SENSE single byte | 
|---|
| 2350 | * allocation length field. | 
|---|
| 2351 | */ | 
|---|
| 2352 | if (use_10_for_ms) { | 
|---|
| 2353 | if (len > 255) | 
|---|
| 2354 | return -EIO; | 
|---|
| 2355 | sdev->use_10_for_ms = 0; | 
|---|
| 2356 | goto retry; | 
|---|
| 2357 | } | 
|---|
| 2358 | } | 
|---|
| 2359 | } | 
|---|
| 2360 | return -EIO; | 
|---|
| 2361 | } | 
|---|
| 2362 | if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && | 
|---|
| 2363 | (modepage == 6 || modepage == 8))) { | 
|---|
| 2364 | /* Initio breakage? */ | 
|---|
| 2365 | header_length = 0; | 
|---|
| 2366 | data->length = 13; | 
|---|
| 2367 | data->medium_type = 0; | 
|---|
| 2368 | data->device_specific = 0; | 
|---|
| 2369 | data->longlba = 0; | 
|---|
| 2370 | data->block_descriptor_length = 0; | 
|---|
| 2371 | } else if (use_10_for_ms) { | 
|---|
| 2372 | data->length = get_unaligned_be16(p: &buffer[0]) + 2; | 
|---|
| 2373 | data->medium_type = buffer[2]; | 
|---|
| 2374 | data->device_specific = buffer[3]; | 
|---|
| 2375 | data->longlba = buffer[4] & 0x01; | 
|---|
| 2376 | data->block_descriptor_length = get_unaligned_be16(p: &buffer[6]); | 
|---|
| 2377 | } else { | 
|---|
| 2378 | data->length = buffer[0] + 1; | 
|---|
| 2379 | data->medium_type = buffer[1]; | 
|---|
| 2380 | data->device_specific = buffer[2]; | 
|---|
| 2381 | data->block_descriptor_length = buffer[3]; | 
|---|
| 2382 | } | 
|---|
| 2383 | data->header_length = header_length; | 
|---|
| 2384 |  | 
|---|
| 2385 | return 0; | 
|---|
| 2386 | } | 
|---|
| 2387 | EXPORT_SYMBOL(scsi_mode_sense); | 
|---|
| 2388 |  | 
|---|
| 2389 | /** | 
|---|
| 2390 | *	scsi_test_unit_ready - test if unit is ready | 
|---|
| 2391 | *	@sdev:	scsi device to change the state of. | 
|---|
| 2392 | *	@timeout: command timeout | 
|---|
| 2393 | *	@retries: number of retries before failing | 
|---|
| 2394 | *	@sshdr: outpout pointer for decoded sense information. | 
|---|
| 2395 | * | 
|---|
| 2396 | *	Returns zero if unsuccessful or an error if TUR failed.  For | 
|---|
| 2397 | *	removable media, UNIT_ATTENTION sets ->changed flag. | 
|---|
| 2398 | **/ | 
|---|
| 2399 | int | 
|---|
| 2400 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, | 
|---|
| 2401 | struct scsi_sense_hdr *sshdr) | 
|---|
| 2402 | { | 
|---|
| 2403 | char cmd[] = { | 
|---|
| 2404 | TEST_UNIT_READY, 0, 0, 0, 0, 0, | 
|---|
| 2405 | }; | 
|---|
| 2406 | const struct scsi_exec_args exec_args = { | 
|---|
| 2407 | .sshdr = sshdr, | 
|---|
| 2408 | }; | 
|---|
| 2409 | int result; | 
|---|
| 2410 |  | 
|---|
| 2411 | /* try to eat the UNIT_ATTENTION if there are enough retries */ | 
|---|
| 2412 | do { | 
|---|
| 2413 | result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, | 
|---|
| 2414 | timeout, 1, &exec_args); | 
|---|
| 2415 | if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) && | 
|---|
| 2416 | sshdr->sense_key == UNIT_ATTENTION) | 
|---|
| 2417 | sdev->changed = 1; | 
|---|
| 2418 | } while (result > 0 && scsi_sense_valid(sshdr) && | 
|---|
| 2419 | sshdr->sense_key == UNIT_ATTENTION && --retries); | 
|---|
| 2420 |  | 
|---|
| 2421 | return result; | 
|---|
| 2422 | } | 
|---|
| 2423 | EXPORT_SYMBOL(scsi_test_unit_ready); | 
|---|
| 2424 |  | 
|---|
| 2425 | /** | 
|---|
| 2426 | *	scsi_device_set_state - Take the given device through the device state model. | 
|---|
| 2427 | *	@sdev:	scsi device to change the state of. | 
|---|
| 2428 | *	@state:	state to change to. | 
|---|
| 2429 | * | 
|---|
| 2430 | *	Returns zero if successful or an error if the requested | 
|---|
| 2431 | *	transition is illegal. | 
|---|
| 2432 | */ | 
|---|
| 2433 | int | 
|---|
| 2434 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | 
|---|
| 2435 | { | 
|---|
| 2436 | enum scsi_device_state oldstate = sdev->sdev_state; | 
|---|
| 2437 |  | 
|---|
| 2438 | if (state == oldstate) | 
|---|
| 2439 | return 0; | 
|---|
| 2440 |  | 
|---|
| 2441 | switch (state) { | 
|---|
| 2442 | case SDEV_CREATED: | 
|---|
| 2443 | switch (oldstate) { | 
|---|
| 2444 | case SDEV_CREATED_BLOCK: | 
|---|
| 2445 | break; | 
|---|
| 2446 | default: | 
|---|
| 2447 | goto illegal; | 
|---|
| 2448 | } | 
|---|
| 2449 | break; | 
|---|
| 2450 |  | 
|---|
| 2451 | case SDEV_RUNNING: | 
|---|
| 2452 | switch (oldstate) { | 
|---|
| 2453 | case SDEV_CREATED: | 
|---|
| 2454 | case SDEV_OFFLINE: | 
|---|
| 2455 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 2456 | case SDEV_QUIESCE: | 
|---|
| 2457 | case SDEV_BLOCK: | 
|---|
| 2458 | break; | 
|---|
| 2459 | default: | 
|---|
| 2460 | goto illegal; | 
|---|
| 2461 | } | 
|---|
| 2462 | break; | 
|---|
| 2463 |  | 
|---|
| 2464 | case SDEV_QUIESCE: | 
|---|
| 2465 | switch (oldstate) { | 
|---|
| 2466 | case SDEV_RUNNING: | 
|---|
| 2467 | case SDEV_OFFLINE: | 
|---|
| 2468 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 2469 | break; | 
|---|
| 2470 | default: | 
|---|
| 2471 | goto illegal; | 
|---|
| 2472 | } | 
|---|
| 2473 | break; | 
|---|
| 2474 |  | 
|---|
| 2475 | case SDEV_OFFLINE: | 
|---|
| 2476 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 2477 | switch (oldstate) { | 
|---|
| 2478 | case SDEV_CREATED: | 
|---|
| 2479 | case SDEV_RUNNING: | 
|---|
| 2480 | case SDEV_QUIESCE: | 
|---|
| 2481 | case SDEV_BLOCK: | 
|---|
| 2482 | break; | 
|---|
| 2483 | default: | 
|---|
| 2484 | goto illegal; | 
|---|
| 2485 | } | 
|---|
| 2486 | break; | 
|---|
| 2487 |  | 
|---|
| 2488 | case SDEV_BLOCK: | 
|---|
| 2489 | switch (oldstate) { | 
|---|
| 2490 | case SDEV_RUNNING: | 
|---|
| 2491 | case SDEV_CREATED_BLOCK: | 
|---|
| 2492 | case SDEV_QUIESCE: | 
|---|
| 2493 | case SDEV_OFFLINE: | 
|---|
| 2494 | break; | 
|---|
| 2495 | default: | 
|---|
| 2496 | goto illegal; | 
|---|
| 2497 | } | 
|---|
| 2498 | break; | 
|---|
| 2499 |  | 
|---|
| 2500 | case SDEV_CREATED_BLOCK: | 
|---|
| 2501 | switch (oldstate) { | 
|---|
| 2502 | case SDEV_CREATED: | 
|---|
| 2503 | break; | 
|---|
| 2504 | default: | 
|---|
| 2505 | goto illegal; | 
|---|
| 2506 | } | 
|---|
| 2507 | break; | 
|---|
| 2508 |  | 
|---|
| 2509 | case SDEV_CANCEL: | 
|---|
| 2510 | switch (oldstate) { | 
|---|
| 2511 | case SDEV_CREATED: | 
|---|
| 2512 | case SDEV_RUNNING: | 
|---|
| 2513 | case SDEV_QUIESCE: | 
|---|
| 2514 | case SDEV_OFFLINE: | 
|---|
| 2515 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 2516 | break; | 
|---|
| 2517 | default: | 
|---|
| 2518 | goto illegal; | 
|---|
| 2519 | } | 
|---|
| 2520 | break; | 
|---|
| 2521 |  | 
|---|
| 2522 | case SDEV_DEL: | 
|---|
| 2523 | switch (oldstate) { | 
|---|
| 2524 | case SDEV_CREATED: | 
|---|
| 2525 | case SDEV_RUNNING: | 
|---|
| 2526 | case SDEV_OFFLINE: | 
|---|
| 2527 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 2528 | case SDEV_CANCEL: | 
|---|
| 2529 | case SDEV_BLOCK: | 
|---|
| 2530 | case SDEV_CREATED_BLOCK: | 
|---|
| 2531 | break; | 
|---|
| 2532 | default: | 
|---|
| 2533 | goto illegal; | 
|---|
| 2534 | } | 
|---|
| 2535 | break; | 
|---|
| 2536 |  | 
|---|
| 2537 | } | 
|---|
| 2538 | sdev->offline_already = false; | 
|---|
| 2539 | sdev->sdev_state = state; | 
|---|
| 2540 | return 0; | 
|---|
| 2541 |  | 
|---|
| 2542 | illegal: | 
|---|
| 2543 | SCSI_LOG_ERROR_RECOVERY(1, | 
|---|
| 2544 | sdev_printk(KERN_ERR, sdev, | 
|---|
| 2545 | "Illegal state transition %s->%s", | 
|---|
| 2546 | scsi_device_state_name(oldstate), | 
|---|
| 2547 | scsi_device_state_name(state)) | 
|---|
| 2548 | ); | 
|---|
| 2549 | return -EINVAL; | 
|---|
| 2550 | } | 
|---|
| 2551 | EXPORT_SYMBOL(scsi_device_set_state); | 
|---|
| 2552 |  | 
|---|
| 2553 | /** | 
|---|
| 2554 | *	scsi_evt_emit - emit a single SCSI device uevent | 
|---|
| 2555 | *	@sdev: associated SCSI device | 
|---|
| 2556 | *	@evt: event to emit | 
|---|
| 2557 | * | 
|---|
| 2558 | *	Send a single uevent (scsi_event) to the associated scsi_device. | 
|---|
| 2559 | */ | 
|---|
| 2560 | static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) | 
|---|
| 2561 | { | 
|---|
| 2562 | int idx = 0; | 
|---|
| 2563 | char *envp[3]; | 
|---|
| 2564 |  | 
|---|
| 2565 | switch (evt->evt_type) { | 
|---|
| 2566 | case SDEV_EVT_MEDIA_CHANGE: | 
|---|
| 2567 | envp[idx++] = "SDEV_MEDIA_CHANGE=1"; | 
|---|
| 2568 | break; | 
|---|
| 2569 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: | 
|---|
| 2570 | scsi_rescan_device(sdev); | 
|---|
| 2571 | envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; | 
|---|
| 2572 | break; | 
|---|
| 2573 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: | 
|---|
| 2574 | envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; | 
|---|
| 2575 | break; | 
|---|
| 2576 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: | 
|---|
| 2577 | envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; | 
|---|
| 2578 | break; | 
|---|
| 2579 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: | 
|---|
| 2580 | envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; | 
|---|
| 2581 | break; | 
|---|
| 2582 | case SDEV_EVT_LUN_CHANGE_REPORTED: | 
|---|
| 2583 | envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; | 
|---|
| 2584 | break; | 
|---|
| 2585 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: | 
|---|
| 2586 | envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED"; | 
|---|
| 2587 | break; | 
|---|
| 2588 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: | 
|---|
| 2589 | envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED"; | 
|---|
| 2590 | break; | 
|---|
| 2591 | default: | 
|---|
| 2592 | /* do nothing */ | 
|---|
| 2593 | break; | 
|---|
| 2594 | } | 
|---|
| 2595 |  | 
|---|
| 2596 | envp[idx++] = NULL; | 
|---|
| 2597 |  | 
|---|
| 2598 | kobject_uevent_env(kobj: &sdev->sdev_gendev.kobj, action: KOBJ_CHANGE, envp); | 
|---|
| 2599 | } | 
|---|
| 2600 |  | 
|---|
| 2601 | /** | 
|---|
| 2602 | *	scsi_evt_thread - send a uevent for each scsi event | 
|---|
| 2603 | *	@work: work struct for scsi_device | 
|---|
| 2604 | * | 
|---|
| 2605 | *	Dispatch queued events to their associated scsi_device kobjects | 
|---|
| 2606 | *	as uevents. | 
|---|
| 2607 | */ | 
|---|
| 2608 | void scsi_evt_thread(struct work_struct *work) | 
|---|
| 2609 | { | 
|---|
| 2610 | struct scsi_device *sdev; | 
|---|
| 2611 | enum scsi_device_event evt_type; | 
|---|
| 2612 | LIST_HEAD(event_list); | 
|---|
| 2613 |  | 
|---|
| 2614 | sdev = container_of(work, struct scsi_device, event_work); | 
|---|
| 2615 |  | 
|---|
| 2616 | for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) | 
|---|
| 2617 | if (test_and_clear_bit(nr: evt_type, addr: sdev->pending_events)) | 
|---|
| 2618 | sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); | 
|---|
| 2619 |  | 
|---|
| 2620 | while (1) { | 
|---|
| 2621 | struct scsi_event *evt; | 
|---|
| 2622 | struct list_head *this, *tmp; | 
|---|
| 2623 | unsigned long flags; | 
|---|
| 2624 |  | 
|---|
| 2625 | spin_lock_irqsave(&sdev->list_lock, flags); | 
|---|
| 2626 | list_splice_init(list: &sdev->event_list, head: &event_list); | 
|---|
| 2627 | spin_unlock_irqrestore(lock: &sdev->list_lock, flags); | 
|---|
| 2628 |  | 
|---|
| 2629 | if (list_empty(head: &event_list)) | 
|---|
| 2630 | break; | 
|---|
| 2631 |  | 
|---|
| 2632 | list_for_each_safe(this, tmp, &event_list) { | 
|---|
| 2633 | evt = list_entry(this, struct scsi_event, node); | 
|---|
| 2634 | list_del(entry: &evt->node); | 
|---|
| 2635 | scsi_evt_emit(sdev, evt); | 
|---|
| 2636 | kfree(objp: evt); | 
|---|
| 2637 | } | 
|---|
| 2638 | } | 
|---|
| 2639 | } | 
|---|
| 2640 |  | 
|---|
| 2641 | /** | 
|---|
| 2642 | * 	sdev_evt_send - send asserted event to uevent thread | 
|---|
| 2643 | *	@sdev: scsi_device event occurred on | 
|---|
| 2644 | *	@evt: event to send | 
|---|
| 2645 | * | 
|---|
| 2646 | *	Assert scsi device event asynchronously. | 
|---|
| 2647 | */ | 
|---|
| 2648 | void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) | 
|---|
| 2649 | { | 
|---|
| 2650 | unsigned long flags; | 
|---|
| 2651 |  | 
|---|
| 2652 | #if 0 | 
|---|
| 2653 | /* FIXME: currently this check eliminates all media change events | 
|---|
| 2654 | * for polled devices.  Need to update to discriminate between AN | 
|---|
| 2655 | * and polled events */ | 
|---|
| 2656 | if (!test_bit(evt->evt_type, sdev->supported_events)) { | 
|---|
| 2657 | kfree(evt); | 
|---|
| 2658 | return; | 
|---|
| 2659 | } | 
|---|
| 2660 | #endif | 
|---|
| 2661 |  | 
|---|
| 2662 | spin_lock_irqsave(&sdev->list_lock, flags); | 
|---|
| 2663 | list_add_tail(new: &evt->node, head: &sdev->event_list); | 
|---|
| 2664 | schedule_work(work: &sdev->event_work); | 
|---|
| 2665 | spin_unlock_irqrestore(lock: &sdev->list_lock, flags); | 
|---|
| 2666 | } | 
|---|
| 2667 | EXPORT_SYMBOL_GPL(sdev_evt_send); | 
|---|
| 2668 |  | 
|---|
| 2669 | /** | 
|---|
| 2670 | * 	sdev_evt_alloc - allocate a new scsi event | 
|---|
| 2671 | *	@evt_type: type of event to allocate | 
|---|
| 2672 | *	@gfpflags: GFP flags for allocation | 
|---|
| 2673 | * | 
|---|
| 2674 | *	Allocates and returns a new scsi_event. | 
|---|
| 2675 | */ | 
|---|
| 2676 | struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, | 
|---|
| 2677 | gfp_t gfpflags) | 
|---|
| 2678 | { | 
|---|
| 2679 | struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); | 
|---|
| 2680 | if (!evt) | 
|---|
| 2681 | return NULL; | 
|---|
| 2682 |  | 
|---|
| 2683 | evt->evt_type = evt_type; | 
|---|
| 2684 | INIT_LIST_HEAD(list: &evt->node); | 
|---|
| 2685 |  | 
|---|
| 2686 | /* evt_type-specific initialization, if any */ | 
|---|
| 2687 | switch (evt_type) { | 
|---|
| 2688 | case SDEV_EVT_MEDIA_CHANGE: | 
|---|
| 2689 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: | 
|---|
| 2690 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: | 
|---|
| 2691 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: | 
|---|
| 2692 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: | 
|---|
| 2693 | case SDEV_EVT_LUN_CHANGE_REPORTED: | 
|---|
| 2694 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: | 
|---|
| 2695 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: | 
|---|
| 2696 | default: | 
|---|
| 2697 | /* do nothing */ | 
|---|
| 2698 | break; | 
|---|
| 2699 | } | 
|---|
| 2700 |  | 
|---|
| 2701 | return evt; | 
|---|
| 2702 | } | 
|---|
| 2703 | EXPORT_SYMBOL_GPL(sdev_evt_alloc); | 
|---|
| 2704 |  | 
|---|
| 2705 | /** | 
|---|
| 2706 | * 	sdev_evt_send_simple - send asserted event to uevent thread | 
|---|
| 2707 | *	@sdev: scsi_device event occurred on | 
|---|
| 2708 | *	@evt_type: type of event to send | 
|---|
| 2709 | *	@gfpflags: GFP flags for allocation | 
|---|
| 2710 | * | 
|---|
| 2711 | *	Assert scsi device event asynchronously, given an event type. | 
|---|
| 2712 | */ | 
|---|
| 2713 | void sdev_evt_send_simple(struct scsi_device *sdev, | 
|---|
| 2714 | enum scsi_device_event evt_type, gfp_t gfpflags) | 
|---|
| 2715 | { | 
|---|
| 2716 | struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); | 
|---|
| 2717 | if (!evt) { | 
|---|
| 2718 | sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", | 
|---|
| 2719 | evt_type); | 
|---|
| 2720 | return; | 
|---|
| 2721 | } | 
|---|
| 2722 |  | 
|---|
| 2723 | sdev_evt_send(sdev, evt); | 
|---|
| 2724 | } | 
|---|
| 2725 | EXPORT_SYMBOL_GPL(sdev_evt_send_simple); | 
|---|
| 2726 |  | 
|---|
| 2727 | /** | 
|---|
| 2728 | *	scsi_device_quiesce - Block all commands except power management. | 
|---|
| 2729 | *	@sdev:	scsi device to quiesce. | 
|---|
| 2730 | * | 
|---|
| 2731 | *	This works by trying to transition to the SDEV_QUIESCE state | 
|---|
| 2732 | *	(which must be a legal transition).  When the device is in this | 
|---|
| 2733 | *	state, only power management requests will be accepted, all others will | 
|---|
| 2734 | *	be deferred. | 
|---|
| 2735 | * | 
|---|
| 2736 | *	Must be called with user context, may sleep. | 
|---|
| 2737 | * | 
|---|
| 2738 | *	Returns zero if unsuccessful or an error if not. | 
|---|
| 2739 | */ | 
|---|
| 2740 | int | 
|---|
| 2741 | scsi_device_quiesce(struct scsi_device *sdev) | 
|---|
| 2742 | { | 
|---|
| 2743 | struct request_queue *q = sdev->request_queue; | 
|---|
| 2744 | unsigned int memflags; | 
|---|
| 2745 | int err; | 
|---|
| 2746 |  | 
|---|
| 2747 | /* | 
|---|
| 2748 | * It is allowed to call scsi_device_quiesce() multiple times from | 
|---|
| 2749 | * the same context but concurrent scsi_device_quiesce() calls are | 
|---|
| 2750 | * not allowed. | 
|---|
| 2751 | */ | 
|---|
| 2752 | WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); | 
|---|
| 2753 |  | 
|---|
| 2754 | if (sdev->quiesced_by == current) | 
|---|
| 2755 | return 0; | 
|---|
| 2756 |  | 
|---|
| 2757 | blk_set_pm_only(q); | 
|---|
| 2758 |  | 
|---|
| 2759 | memflags = blk_mq_freeze_queue(q); | 
|---|
| 2760 | /* | 
|---|
| 2761 | * Ensure that the effect of blk_set_pm_only() will be visible | 
|---|
| 2762 | * for percpu_ref_tryget() callers that occur after the queue | 
|---|
| 2763 | * unfreeze even if the queue was already frozen before this function | 
|---|
| 2764 | * was called. See also https://lwn.net/Articles/573497/. | 
|---|
| 2765 | */ | 
|---|
| 2766 | synchronize_rcu(); | 
|---|
| 2767 | blk_mq_unfreeze_queue(q, memflags); | 
|---|
| 2768 |  | 
|---|
| 2769 | mutex_lock(lock: &sdev->state_mutex); | 
|---|
| 2770 | err = scsi_device_set_state(sdev, SDEV_QUIESCE); | 
|---|
| 2771 | if (err == 0) | 
|---|
| 2772 | sdev->quiesced_by = current; | 
|---|
| 2773 | else | 
|---|
| 2774 | blk_clear_pm_only(q); | 
|---|
| 2775 | mutex_unlock(lock: &sdev->state_mutex); | 
|---|
| 2776 |  | 
|---|
| 2777 | return err; | 
|---|
| 2778 | } | 
|---|
| 2779 | EXPORT_SYMBOL(scsi_device_quiesce); | 
|---|
| 2780 |  | 
|---|
| 2781 | /** | 
|---|
| 2782 | *	scsi_device_resume - Restart user issued commands to a quiesced device. | 
|---|
| 2783 | *	@sdev:	scsi device to resume. | 
|---|
| 2784 | * | 
|---|
| 2785 | *	Moves the device from quiesced back to running and restarts the | 
|---|
| 2786 | *	queues. | 
|---|
| 2787 | * | 
|---|
| 2788 | *	Must be called with user context, may sleep. | 
|---|
| 2789 | */ | 
|---|
| 2790 | void scsi_device_resume(struct scsi_device *sdev) | 
|---|
| 2791 | { | 
|---|
| 2792 | /* check if the device state was mutated prior to resume, and if | 
|---|
| 2793 | * so assume the state is being managed elsewhere (for example | 
|---|
| 2794 | * device deleted during suspend) | 
|---|
| 2795 | */ | 
|---|
| 2796 | mutex_lock(lock: &sdev->state_mutex); | 
|---|
| 2797 | if (sdev->sdev_state == SDEV_QUIESCE) | 
|---|
| 2798 | scsi_device_set_state(sdev, SDEV_RUNNING); | 
|---|
| 2799 | if (sdev->quiesced_by) { | 
|---|
| 2800 | sdev->quiesced_by = NULL; | 
|---|
| 2801 | blk_clear_pm_only(q: sdev->request_queue); | 
|---|
| 2802 | } | 
|---|
| 2803 | mutex_unlock(lock: &sdev->state_mutex); | 
|---|
| 2804 | } | 
|---|
| 2805 | EXPORT_SYMBOL(scsi_device_resume); | 
|---|
| 2806 |  | 
|---|
| 2807 | static void | 
|---|
| 2808 | device_quiesce_fn(struct scsi_device *sdev, void *data) | 
|---|
| 2809 | { | 
|---|
| 2810 | scsi_device_quiesce(sdev); | 
|---|
| 2811 | } | 
|---|
| 2812 |  | 
|---|
| 2813 | void | 
|---|
| 2814 | scsi_target_quiesce(struct scsi_target *starget) | 
|---|
| 2815 | { | 
|---|
| 2816 | starget_for_each_device(starget, NULL, fn: device_quiesce_fn); | 
|---|
| 2817 | } | 
|---|
| 2818 | EXPORT_SYMBOL(scsi_target_quiesce); | 
|---|
| 2819 |  | 
|---|
| 2820 | static void | 
|---|
| 2821 | device_resume_fn(struct scsi_device *sdev, void *data) | 
|---|
| 2822 | { | 
|---|
| 2823 | scsi_device_resume(sdev); | 
|---|
| 2824 | } | 
|---|
| 2825 |  | 
|---|
| 2826 | void | 
|---|
| 2827 | scsi_target_resume(struct scsi_target *starget) | 
|---|
| 2828 | { | 
|---|
| 2829 | starget_for_each_device(starget, NULL, fn: device_resume_fn); | 
|---|
| 2830 | } | 
|---|
| 2831 | EXPORT_SYMBOL(scsi_target_resume); | 
|---|
| 2832 |  | 
|---|
| 2833 | static int __scsi_internal_device_block_nowait(struct scsi_device *sdev) | 
|---|
| 2834 | { | 
|---|
| 2835 | if (scsi_device_set_state(sdev, SDEV_BLOCK)) | 
|---|
| 2836 | return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); | 
|---|
| 2837 |  | 
|---|
| 2838 | return 0; | 
|---|
| 2839 | } | 
|---|
| 2840 |  | 
|---|
| 2841 | void scsi_start_queue(struct scsi_device *sdev) | 
|---|
| 2842 | { | 
|---|
| 2843 | if (cmpxchg(&sdev->queue_stopped, 1, 0)) | 
|---|
| 2844 | blk_mq_unquiesce_queue(q: sdev->request_queue); | 
|---|
| 2845 | } | 
|---|
| 2846 |  | 
|---|
| 2847 | static void scsi_stop_queue(struct scsi_device *sdev) | 
|---|
| 2848 | { | 
|---|
| 2849 | /* | 
|---|
| 2850 | * The atomic variable of ->queue_stopped covers that | 
|---|
| 2851 | * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. | 
|---|
| 2852 | * | 
|---|
| 2853 | * The caller needs to wait until quiesce is done. | 
|---|
| 2854 | */ | 
|---|
| 2855 | if (!cmpxchg(&sdev->queue_stopped, 0, 1)) | 
|---|
| 2856 | blk_mq_quiesce_queue_nowait(q: sdev->request_queue); | 
|---|
| 2857 | } | 
|---|
| 2858 |  | 
|---|
| 2859 | /** | 
|---|
| 2860 | * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state | 
|---|
| 2861 | * @sdev: device to block | 
|---|
| 2862 | * | 
|---|
| 2863 | * Pause SCSI command processing on the specified device. Does not sleep. | 
|---|
| 2864 | * | 
|---|
| 2865 | * Returns zero if successful or a negative error code upon failure. | 
|---|
| 2866 | * | 
|---|
| 2867 | * Notes: | 
|---|
| 2868 | * This routine transitions the device to the SDEV_BLOCK state (which must be | 
|---|
| 2869 | * a legal transition). When the device is in this state, command processing | 
|---|
| 2870 | * is paused until the device leaves the SDEV_BLOCK state. See also | 
|---|
| 2871 | * scsi_internal_device_unblock_nowait(). | 
|---|
| 2872 | */ | 
|---|
| 2873 | int scsi_internal_device_block_nowait(struct scsi_device *sdev) | 
|---|
| 2874 | { | 
|---|
| 2875 | int ret = __scsi_internal_device_block_nowait(sdev); | 
|---|
| 2876 |  | 
|---|
| 2877 | /* | 
|---|
| 2878 | * The device has transitioned to SDEV_BLOCK.  Stop the | 
|---|
| 2879 | * block layer from calling the midlayer with this device's | 
|---|
| 2880 | * request queue. | 
|---|
| 2881 | */ | 
|---|
| 2882 | if (!ret) | 
|---|
| 2883 | scsi_stop_queue(sdev); | 
|---|
| 2884 | return ret; | 
|---|
| 2885 | } | 
|---|
| 2886 | EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); | 
|---|
| 2887 |  | 
|---|
| 2888 | /** | 
|---|
| 2889 | * scsi_device_block - try to transition to the SDEV_BLOCK state | 
|---|
| 2890 | * @sdev: device to block | 
|---|
| 2891 | * @data: dummy argument, ignored | 
|---|
| 2892 | * | 
|---|
| 2893 | * Pause SCSI command processing on the specified device. Callers must wait | 
|---|
| 2894 | * until all ongoing scsi_queue_rq() calls have finished after this function | 
|---|
| 2895 | * returns. | 
|---|
| 2896 | * | 
|---|
| 2897 | * Note: | 
|---|
| 2898 | * This routine transitions the device to the SDEV_BLOCK state (which must be | 
|---|
| 2899 | * a legal transition). When the device is in this state, command processing | 
|---|
| 2900 | * is paused until the device leaves the SDEV_BLOCK state. See also | 
|---|
| 2901 | * scsi_internal_device_unblock(). | 
|---|
| 2902 | */ | 
|---|
| 2903 | static void scsi_device_block(struct scsi_device *sdev, void *data) | 
|---|
| 2904 | { | 
|---|
| 2905 | int err; | 
|---|
| 2906 | enum scsi_device_state state; | 
|---|
| 2907 |  | 
|---|
| 2908 | mutex_lock(lock: &sdev->state_mutex); | 
|---|
| 2909 | err = __scsi_internal_device_block_nowait(sdev); | 
|---|
| 2910 | state = sdev->sdev_state; | 
|---|
| 2911 | if (err == 0) | 
|---|
| 2912 | /* | 
|---|
| 2913 | * scsi_stop_queue() must be called with the state_mutex | 
|---|
| 2914 | * held. Otherwise a simultaneous scsi_start_queue() call | 
|---|
| 2915 | * might unquiesce the queue before we quiesce it. | 
|---|
| 2916 | */ | 
|---|
| 2917 | scsi_stop_queue(sdev); | 
|---|
| 2918 |  | 
|---|
| 2919 | mutex_unlock(lock: &sdev->state_mutex); | 
|---|
| 2920 |  | 
|---|
| 2921 | WARN_ONCE(err, "%s: failed to block %s in state %d\n", | 
|---|
| 2922 | __func__, dev_name(&sdev->sdev_gendev), state); | 
|---|
| 2923 | } | 
|---|
| 2924 |  | 
|---|
| 2925 | /** | 
|---|
| 2926 | * scsi_internal_device_unblock_nowait - resume a device after a block request | 
|---|
| 2927 | * @sdev:	device to resume | 
|---|
| 2928 | * @new_state:	state to set the device to after unblocking | 
|---|
| 2929 | * | 
|---|
| 2930 | * Restart the device queue for a previously suspended SCSI device. Does not | 
|---|
| 2931 | * sleep. | 
|---|
| 2932 | * | 
|---|
| 2933 | * Returns zero if successful or a negative error code upon failure. | 
|---|
| 2934 | * | 
|---|
| 2935 | * Notes: | 
|---|
| 2936 | * This routine transitions the device to the SDEV_RUNNING state or to one of | 
|---|
| 2937 | * the offline states (which must be a legal transition) allowing the midlayer | 
|---|
| 2938 | * to goose the queue for this device. | 
|---|
| 2939 | */ | 
|---|
| 2940 | int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, | 
|---|
| 2941 | enum scsi_device_state new_state) | 
|---|
| 2942 | { | 
|---|
| 2943 | switch (new_state) { | 
|---|
| 2944 | case SDEV_RUNNING: | 
|---|
| 2945 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 2946 | break; | 
|---|
| 2947 | default: | 
|---|
| 2948 | return -EINVAL; | 
|---|
| 2949 | } | 
|---|
| 2950 |  | 
|---|
| 2951 | /* | 
|---|
| 2952 | * Try to transition the scsi device to SDEV_RUNNING or one of the | 
|---|
| 2953 | * offlined states and goose the device queue if successful. | 
|---|
| 2954 | */ | 
|---|
| 2955 | switch (sdev->sdev_state) { | 
|---|
| 2956 | case SDEV_BLOCK: | 
|---|
| 2957 | case SDEV_TRANSPORT_OFFLINE: | 
|---|
| 2958 | sdev->sdev_state = new_state; | 
|---|
| 2959 | break; | 
|---|
| 2960 | case SDEV_CREATED_BLOCK: | 
|---|
| 2961 | if (new_state == SDEV_TRANSPORT_OFFLINE || | 
|---|
| 2962 | new_state == SDEV_OFFLINE) | 
|---|
| 2963 | sdev->sdev_state = new_state; | 
|---|
| 2964 | else | 
|---|
| 2965 | sdev->sdev_state = SDEV_CREATED; | 
|---|
| 2966 | break; | 
|---|
| 2967 | case SDEV_CANCEL: | 
|---|
| 2968 | case SDEV_OFFLINE: | 
|---|
| 2969 | break; | 
|---|
| 2970 | default: | 
|---|
| 2971 | return -EINVAL; | 
|---|
| 2972 | } | 
|---|
| 2973 | scsi_start_queue(sdev); | 
|---|
| 2974 |  | 
|---|
| 2975 | return 0; | 
|---|
| 2976 | } | 
|---|
| 2977 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); | 
|---|
| 2978 |  | 
|---|
| 2979 | /** | 
|---|
| 2980 | * scsi_internal_device_unblock - resume a device after a block request | 
|---|
| 2981 | * @sdev:	device to resume | 
|---|
| 2982 | * @new_state:	state to set the device to after unblocking | 
|---|
| 2983 | * | 
|---|
| 2984 | * Restart the device queue for a previously suspended SCSI device. May sleep. | 
|---|
| 2985 | * | 
|---|
| 2986 | * Returns zero if successful or a negative error code upon failure. | 
|---|
| 2987 | * | 
|---|
| 2988 | * Notes: | 
|---|
| 2989 | * This routine transitions the device to the SDEV_RUNNING state or to one of | 
|---|
| 2990 | * the offline states (which must be a legal transition) allowing the midlayer | 
|---|
| 2991 | * to goose the queue for this device. | 
|---|
| 2992 | */ | 
|---|
| 2993 | static int scsi_internal_device_unblock(struct scsi_device *sdev, | 
|---|
| 2994 | enum scsi_device_state new_state) | 
|---|
| 2995 | { | 
|---|
| 2996 | int ret; | 
|---|
| 2997 |  | 
|---|
| 2998 | mutex_lock(lock: &sdev->state_mutex); | 
|---|
| 2999 | ret = scsi_internal_device_unblock_nowait(sdev, new_state); | 
|---|
| 3000 | mutex_unlock(lock: &sdev->state_mutex); | 
|---|
| 3001 |  | 
|---|
| 3002 | return ret; | 
|---|
| 3003 | } | 
|---|
| 3004 |  | 
|---|
| 3005 | static int | 
|---|
| 3006 | target_block(struct device *dev, void *data) | 
|---|
| 3007 | { | 
|---|
| 3008 | if (scsi_is_target_device(dev)) | 
|---|
| 3009 | starget_for_each_device(to_scsi_target(dev), NULL, | 
|---|
| 3010 | fn: scsi_device_block); | 
|---|
| 3011 | return 0; | 
|---|
| 3012 | } | 
|---|
| 3013 |  | 
|---|
| 3014 | /** | 
|---|
| 3015 | * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state | 
|---|
| 3016 | * @dev: a parent device of one or more scsi_target devices | 
|---|
| 3017 | * @shost: the Scsi_Host to which this device belongs | 
|---|
| 3018 | * | 
|---|
| 3019 | * Iterate over all children of @dev, which should be scsi_target devices, | 
|---|
| 3020 | * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for | 
|---|
| 3021 | * ongoing scsi_queue_rq() calls to finish. May sleep. | 
|---|
| 3022 | * | 
|---|
| 3023 | * Note: | 
|---|
| 3024 | * @dev must not itself be a scsi_target device. | 
|---|
| 3025 | */ | 
|---|
| 3026 | void | 
|---|
| 3027 | scsi_block_targets(struct Scsi_Host *shost, struct device *dev) | 
|---|
| 3028 | { | 
|---|
| 3029 | WARN_ON_ONCE(scsi_is_target_device(dev)); | 
|---|
| 3030 | device_for_each_child(parent: dev, NULL, fn: target_block); | 
|---|
| 3031 | blk_mq_wait_quiesce_done(set: &shost->tag_set); | 
|---|
| 3032 | } | 
|---|
| 3033 | EXPORT_SYMBOL_GPL(scsi_block_targets); | 
|---|
| 3034 |  | 
|---|
| 3035 | static void | 
|---|
| 3036 | device_unblock(struct scsi_device *sdev, void *data) | 
|---|
| 3037 | { | 
|---|
| 3038 | scsi_internal_device_unblock(sdev, new_state: *(enum scsi_device_state *)data); | 
|---|
| 3039 | } | 
|---|
| 3040 |  | 
|---|
| 3041 | static int | 
|---|
| 3042 | target_unblock(struct device *dev, void *data) | 
|---|
| 3043 | { | 
|---|
| 3044 | if (scsi_is_target_device(dev)) | 
|---|
| 3045 | starget_for_each_device(to_scsi_target(dev), data, | 
|---|
| 3046 | fn: device_unblock); | 
|---|
| 3047 | return 0; | 
|---|
| 3048 | } | 
|---|
| 3049 |  | 
|---|
| 3050 | void | 
|---|
| 3051 | scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) | 
|---|
| 3052 | { | 
|---|
| 3053 | if (scsi_is_target_device(dev)) | 
|---|
| 3054 | starget_for_each_device(to_scsi_target(dev), &new_state, | 
|---|
| 3055 | fn: device_unblock); | 
|---|
| 3056 | else | 
|---|
| 3057 | device_for_each_child(parent: dev, data: &new_state, fn: target_unblock); | 
|---|
| 3058 | } | 
|---|
| 3059 | EXPORT_SYMBOL_GPL(scsi_target_unblock); | 
|---|
| 3060 |  | 
|---|
| 3061 | /** | 
|---|
| 3062 | * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state | 
|---|
| 3063 | * @shost: device to block | 
|---|
| 3064 | * | 
|---|
| 3065 | * Pause SCSI command processing for all logical units associated with the SCSI | 
|---|
| 3066 | * host and wait until pending scsi_queue_rq() calls have finished. | 
|---|
| 3067 | * | 
|---|
| 3068 | * Returns zero if successful or a negative error code upon failure. | 
|---|
| 3069 | */ | 
|---|
| 3070 | int | 
|---|
| 3071 | scsi_host_block(struct Scsi_Host *shost) | 
|---|
| 3072 | { | 
|---|
| 3073 | struct scsi_device *sdev; | 
|---|
| 3074 | int ret; | 
|---|
| 3075 |  | 
|---|
| 3076 | /* | 
|---|
| 3077 | * Call scsi_internal_device_block_nowait so we can avoid | 
|---|
| 3078 | * calling synchronize_rcu() for each LUN. | 
|---|
| 3079 | */ | 
|---|
| 3080 | shost_for_each_device(sdev, shost) { | 
|---|
| 3081 | mutex_lock(lock: &sdev->state_mutex); | 
|---|
| 3082 | ret = scsi_internal_device_block_nowait(sdev); | 
|---|
| 3083 | mutex_unlock(lock: &sdev->state_mutex); | 
|---|
| 3084 | if (ret) { | 
|---|
| 3085 | scsi_device_put(sdev); | 
|---|
| 3086 | return ret; | 
|---|
| 3087 | } | 
|---|
| 3088 | } | 
|---|
| 3089 |  | 
|---|
| 3090 | /* Wait for ongoing scsi_queue_rq() calls to finish. */ | 
|---|
| 3091 | blk_mq_wait_quiesce_done(set: &shost->tag_set); | 
|---|
| 3092 |  | 
|---|
| 3093 | return 0; | 
|---|
| 3094 | } | 
|---|
| 3095 | EXPORT_SYMBOL_GPL(scsi_host_block); | 
|---|
| 3096 |  | 
|---|
| 3097 | int | 
|---|
| 3098 | scsi_host_unblock(struct Scsi_Host *shost, int new_state) | 
|---|
| 3099 | { | 
|---|
| 3100 | struct scsi_device *sdev; | 
|---|
| 3101 | int ret = 0; | 
|---|
| 3102 |  | 
|---|
| 3103 | shost_for_each_device(sdev, shost) { | 
|---|
| 3104 | ret = scsi_internal_device_unblock(sdev, new_state); | 
|---|
| 3105 | if (ret) { | 
|---|
| 3106 | scsi_device_put(sdev); | 
|---|
| 3107 | break; | 
|---|
| 3108 | } | 
|---|
| 3109 | } | 
|---|
| 3110 | return ret; | 
|---|
| 3111 | } | 
|---|
| 3112 | EXPORT_SYMBOL_GPL(scsi_host_unblock); | 
|---|
| 3113 |  | 
|---|
| 3114 | /** | 
|---|
| 3115 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt | 
|---|
| 3116 | * @sgl:	scatter-gather list | 
|---|
| 3117 | * @sg_count:	number of segments in sg | 
|---|
| 3118 | * @offset:	offset in bytes into sg, on return offset into the mapped area | 
|---|
| 3119 | * @len:	bytes to map, on return number of bytes mapped | 
|---|
| 3120 | * | 
|---|
| 3121 | * Returns virtual address of the start of the mapped page | 
|---|
| 3122 | */ | 
|---|
| 3123 | void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, | 
|---|
| 3124 | size_t *offset, size_t *len) | 
|---|
| 3125 | { | 
|---|
| 3126 | int i; | 
|---|
| 3127 | size_t sg_len = 0, len_complete = 0; | 
|---|
| 3128 | struct scatterlist *sg; | 
|---|
| 3129 | struct page *page; | 
|---|
| 3130 |  | 
|---|
| 3131 | WARN_ON(!irqs_disabled()); | 
|---|
| 3132 |  | 
|---|
| 3133 | for_each_sg(sgl, sg, sg_count, i) { | 
|---|
| 3134 | len_complete = sg_len; /* Complete sg-entries */ | 
|---|
| 3135 | sg_len += sg->length; | 
|---|
| 3136 | if (sg_len > *offset) | 
|---|
| 3137 | break; | 
|---|
| 3138 | } | 
|---|
| 3139 |  | 
|---|
| 3140 | if (unlikely(i == sg_count)) { | 
|---|
| 3141 | printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " | 
|---|
| 3142 | "elements %d\n", | 
|---|
| 3143 | __func__, sg_len, *offset, sg_count); | 
|---|
| 3144 | WARN_ON(1); | 
|---|
| 3145 | return NULL; | 
|---|
| 3146 | } | 
|---|
| 3147 |  | 
|---|
| 3148 | /* Offset starting from the beginning of first page in this sg-entry */ | 
|---|
| 3149 | *offset = *offset - len_complete + sg->offset; | 
|---|
| 3150 |  | 
|---|
| 3151 | page = sg_page(sg) + (*offset >> PAGE_SHIFT); | 
|---|
| 3152 | *offset &= ~PAGE_MASK; | 
|---|
| 3153 |  | 
|---|
| 3154 | /* Bytes in this sg-entry from *offset to the end of the page */ | 
|---|
| 3155 | sg_len = PAGE_SIZE - *offset; | 
|---|
| 3156 | if (*len > sg_len) | 
|---|
| 3157 | *len = sg_len; | 
|---|
| 3158 |  | 
|---|
| 3159 | return kmap_atomic(page); | 
|---|
| 3160 | } | 
|---|
| 3161 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); | 
|---|
| 3162 |  | 
|---|
| 3163 | /** | 
|---|
| 3164 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg | 
|---|
| 3165 | * @virt:	virtual address to be unmapped | 
|---|
| 3166 | */ | 
|---|
| 3167 | void scsi_kunmap_atomic_sg(void *virt) | 
|---|
| 3168 | { | 
|---|
| 3169 | kunmap_atomic(virt); | 
|---|
| 3170 | } | 
|---|
| 3171 | EXPORT_SYMBOL(scsi_kunmap_atomic_sg); | 
|---|
| 3172 |  | 
|---|
| 3173 | void sdev_disable_disk_events(struct scsi_device *sdev) | 
|---|
| 3174 | { | 
|---|
| 3175 | atomic_inc(v: &sdev->disk_events_disable_depth); | 
|---|
| 3176 | } | 
|---|
| 3177 | EXPORT_SYMBOL(sdev_disable_disk_events); | 
|---|
| 3178 |  | 
|---|
| 3179 | void sdev_enable_disk_events(struct scsi_device *sdev) | 
|---|
| 3180 | { | 
|---|
| 3181 | if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) | 
|---|
| 3182 | return; | 
|---|
| 3183 | atomic_dec(v: &sdev->disk_events_disable_depth); | 
|---|
| 3184 | } | 
|---|
| 3185 | EXPORT_SYMBOL(sdev_enable_disk_events); | 
|---|
| 3186 |  | 
|---|
| 3187 | static unsigned char designator_prio(const unsigned char *d) | 
|---|
| 3188 | { | 
|---|
| 3189 | if (d[1] & 0x30) | 
|---|
| 3190 | /* not associated with LUN */ | 
|---|
| 3191 | return 0; | 
|---|
| 3192 |  | 
|---|
| 3193 | if (d[3] == 0) | 
|---|
| 3194 | /* invalid length */ | 
|---|
| 3195 | return 0; | 
|---|
| 3196 |  | 
|---|
| 3197 | /* | 
|---|
| 3198 | * Order of preference for lun descriptor: | 
|---|
| 3199 | * - SCSI name string | 
|---|
| 3200 | * - NAA IEEE Registered Extended | 
|---|
| 3201 | * - EUI-64 based 16-byte | 
|---|
| 3202 | * - EUI-64 based 12-byte | 
|---|
| 3203 | * - NAA IEEE Registered | 
|---|
| 3204 | * - NAA IEEE Extended | 
|---|
| 3205 | * - EUI-64 based 8-byte | 
|---|
| 3206 | * - SCSI name string (truncated) | 
|---|
| 3207 | * - T10 Vendor ID | 
|---|
| 3208 | * as longer descriptors reduce the likelyhood | 
|---|
| 3209 | * of identification clashes. | 
|---|
| 3210 | */ | 
|---|
| 3211 |  | 
|---|
| 3212 | switch (d[1] & 0xf) { | 
|---|
| 3213 | case 8: | 
|---|
| 3214 | /* SCSI name string, variable-length UTF-8 */ | 
|---|
| 3215 | return 9; | 
|---|
| 3216 | case 3: | 
|---|
| 3217 | switch (d[4] >> 4) { | 
|---|
| 3218 | case 6: | 
|---|
| 3219 | /* NAA registered extended */ | 
|---|
| 3220 | return 8; | 
|---|
| 3221 | case 5: | 
|---|
| 3222 | /* NAA registered */ | 
|---|
| 3223 | return 5; | 
|---|
| 3224 | case 4: | 
|---|
| 3225 | /* NAA extended */ | 
|---|
| 3226 | return 4; | 
|---|
| 3227 | case 3: | 
|---|
| 3228 | /* NAA locally assigned */ | 
|---|
| 3229 | return 1; | 
|---|
| 3230 | default: | 
|---|
| 3231 | break; | 
|---|
| 3232 | } | 
|---|
| 3233 | break; | 
|---|
| 3234 | case 2: | 
|---|
| 3235 | switch (d[3]) { | 
|---|
| 3236 | case 16: | 
|---|
| 3237 | /* EUI64-based, 16 byte */ | 
|---|
| 3238 | return 7; | 
|---|
| 3239 | case 12: | 
|---|
| 3240 | /* EUI64-based, 12 byte */ | 
|---|
| 3241 | return 6; | 
|---|
| 3242 | case 8: | 
|---|
| 3243 | /* EUI64-based, 8 byte */ | 
|---|
| 3244 | return 3; | 
|---|
| 3245 | default: | 
|---|
| 3246 | break; | 
|---|
| 3247 | } | 
|---|
| 3248 | break; | 
|---|
| 3249 | case 1: | 
|---|
| 3250 | /* T10 vendor ID */ | 
|---|
| 3251 | return 1; | 
|---|
| 3252 | default: | 
|---|
| 3253 | break; | 
|---|
| 3254 | } | 
|---|
| 3255 |  | 
|---|
| 3256 | return 0; | 
|---|
| 3257 | } | 
|---|
| 3258 |  | 
|---|
| 3259 | /** | 
|---|
| 3260 | * scsi_vpd_lun_id - return a unique device identification | 
|---|
| 3261 | * @sdev: SCSI device | 
|---|
| 3262 | * @id:   buffer for the identification | 
|---|
| 3263 | * @id_len:  length of the buffer | 
|---|
| 3264 | * | 
|---|
| 3265 | * Copies a unique device identification into @id based | 
|---|
| 3266 | * on the information in the VPD page 0x83 of the device. | 
|---|
| 3267 | * The string will be formatted as a SCSI name string. | 
|---|
| 3268 | * | 
|---|
| 3269 | * Returns the length of the identification or error on failure. | 
|---|
| 3270 | * If the identifier is longer than the supplied buffer the actual | 
|---|
| 3271 | * identifier length is returned and the buffer is not zero-padded. | 
|---|
| 3272 | */ | 
|---|
| 3273 | int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) | 
|---|
| 3274 | { | 
|---|
| 3275 | u8 cur_id_prio = 0; | 
|---|
| 3276 | u8 cur_id_size = 0; | 
|---|
| 3277 | const unsigned char *d, *cur_id_str; | 
|---|
| 3278 | const struct scsi_vpd *vpd_pg83; | 
|---|
| 3279 | int id_size = -EINVAL; | 
|---|
| 3280 |  | 
|---|
| 3281 | rcu_read_lock(); | 
|---|
| 3282 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); | 
|---|
| 3283 | if (!vpd_pg83) { | 
|---|
| 3284 | rcu_read_unlock(); | 
|---|
| 3285 | return -ENXIO; | 
|---|
| 3286 | } | 
|---|
| 3287 |  | 
|---|
| 3288 | /* The id string must be at least 20 bytes + terminating NULL byte */ | 
|---|
| 3289 | if (id_len < 21) { | 
|---|
| 3290 | rcu_read_unlock(); | 
|---|
| 3291 | return -EINVAL; | 
|---|
| 3292 | } | 
|---|
| 3293 |  | 
|---|
| 3294 | memset(s: id, c: 0, n: id_len); | 
|---|
| 3295 | for (d = vpd_pg83->data + 4; | 
|---|
| 3296 | d < vpd_pg83->data + vpd_pg83->len; | 
|---|
| 3297 | d += d[3] + 4) { | 
|---|
| 3298 | u8 prio = designator_prio(d); | 
|---|
| 3299 |  | 
|---|
| 3300 | if (prio == 0 || cur_id_prio > prio) | 
|---|
| 3301 | continue; | 
|---|
| 3302 |  | 
|---|
| 3303 | switch (d[1] & 0xf) { | 
|---|
| 3304 | case 0x1: | 
|---|
| 3305 | /* T10 Vendor ID */ | 
|---|
| 3306 | if (cur_id_size > d[3]) | 
|---|
| 3307 | break; | 
|---|
| 3308 | cur_id_prio = prio; | 
|---|
| 3309 | cur_id_size = d[3]; | 
|---|
| 3310 | if (cur_id_size + 4 > id_len) | 
|---|
| 3311 | cur_id_size = id_len - 4; | 
|---|
| 3312 | cur_id_str = d + 4; | 
|---|
| 3313 | id_size = snprintf(buf: id, size: id_len, fmt: "t10.%*pE", | 
|---|
| 3314 | cur_id_size, cur_id_str); | 
|---|
| 3315 | break; | 
|---|
| 3316 | case 0x2: | 
|---|
| 3317 | /* EUI-64 */ | 
|---|
| 3318 | cur_id_prio = prio; | 
|---|
| 3319 | cur_id_size = d[3]; | 
|---|
| 3320 | cur_id_str = d + 4; | 
|---|
| 3321 | switch (cur_id_size) { | 
|---|
| 3322 | case 8: | 
|---|
| 3323 | id_size = snprintf(buf: id, size: id_len, | 
|---|
| 3324 | fmt: "eui.%8phN", | 
|---|
| 3325 | cur_id_str); | 
|---|
| 3326 | break; | 
|---|
| 3327 | case 12: | 
|---|
| 3328 | id_size = snprintf(buf: id, size: id_len, | 
|---|
| 3329 | fmt: "eui.%12phN", | 
|---|
| 3330 | cur_id_str); | 
|---|
| 3331 | break; | 
|---|
| 3332 | case 16: | 
|---|
| 3333 | id_size = snprintf(buf: id, size: id_len, | 
|---|
| 3334 | fmt: "eui.%16phN", | 
|---|
| 3335 | cur_id_str); | 
|---|
| 3336 | break; | 
|---|
| 3337 | default: | 
|---|
| 3338 | break; | 
|---|
| 3339 | } | 
|---|
| 3340 | break; | 
|---|
| 3341 | case 0x3: | 
|---|
| 3342 | /* NAA */ | 
|---|
| 3343 | cur_id_prio = prio; | 
|---|
| 3344 | cur_id_size = d[3]; | 
|---|
| 3345 | cur_id_str = d + 4; | 
|---|
| 3346 | switch (cur_id_size) { | 
|---|
| 3347 | case 8: | 
|---|
| 3348 | id_size = snprintf(buf: id, size: id_len, | 
|---|
| 3349 | fmt: "naa.%8phN", | 
|---|
| 3350 | cur_id_str); | 
|---|
| 3351 | break; | 
|---|
| 3352 | case 16: | 
|---|
| 3353 | id_size = snprintf(buf: id, size: id_len, | 
|---|
| 3354 | fmt: "naa.%16phN", | 
|---|
| 3355 | cur_id_str); | 
|---|
| 3356 | break; | 
|---|
| 3357 | default: | 
|---|
| 3358 | break; | 
|---|
| 3359 | } | 
|---|
| 3360 | break; | 
|---|
| 3361 | case 0x8: | 
|---|
| 3362 | /* SCSI name string */ | 
|---|
| 3363 | if (cur_id_size > d[3]) | 
|---|
| 3364 | break; | 
|---|
| 3365 | /* Prefer others for truncated descriptor */ | 
|---|
| 3366 | if (d[3] > id_len) { | 
|---|
| 3367 | prio = 2; | 
|---|
| 3368 | if (cur_id_prio > prio) | 
|---|
| 3369 | break; | 
|---|
| 3370 | } | 
|---|
| 3371 | cur_id_prio = prio; | 
|---|
| 3372 | cur_id_size = id_size = d[3]; | 
|---|
| 3373 | cur_id_str = d + 4; | 
|---|
| 3374 | if (cur_id_size >= id_len) | 
|---|
| 3375 | cur_id_size = id_len - 1; | 
|---|
| 3376 | memcpy(to: id, from: cur_id_str, len: cur_id_size); | 
|---|
| 3377 | break; | 
|---|
| 3378 | default: | 
|---|
| 3379 | break; | 
|---|
| 3380 | } | 
|---|
| 3381 | } | 
|---|
| 3382 | rcu_read_unlock(); | 
|---|
| 3383 |  | 
|---|
| 3384 | return id_size; | 
|---|
| 3385 | } | 
|---|
| 3386 | EXPORT_SYMBOL(scsi_vpd_lun_id); | 
|---|
| 3387 |  | 
|---|
| 3388 | /** | 
|---|
| 3389 | * scsi_vpd_tpg_id - return a target port group identifier | 
|---|
| 3390 | * @sdev: SCSI device | 
|---|
| 3391 | * @rel_id: pointer to return relative target port in if not %NULL | 
|---|
| 3392 | * | 
|---|
| 3393 | * Returns the Target Port Group identifier from the information | 
|---|
| 3394 | * from VPD page 0x83 of the device. | 
|---|
| 3395 | * Optionally sets @rel_id to the relative target port on success. | 
|---|
| 3396 | * | 
|---|
| 3397 | * Return: the identifier or error on failure. | 
|---|
| 3398 | */ | 
|---|
| 3399 | int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) | 
|---|
| 3400 | { | 
|---|
| 3401 | const unsigned char *d; | 
|---|
| 3402 | const struct scsi_vpd *vpd_pg83; | 
|---|
| 3403 | int group_id = -EAGAIN, rel_port = -1; | 
|---|
| 3404 |  | 
|---|
| 3405 | rcu_read_lock(); | 
|---|
| 3406 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); | 
|---|
| 3407 | if (!vpd_pg83) { | 
|---|
| 3408 | rcu_read_unlock(); | 
|---|
| 3409 | return -ENXIO; | 
|---|
| 3410 | } | 
|---|
| 3411 |  | 
|---|
| 3412 | d = vpd_pg83->data + 4; | 
|---|
| 3413 | while (d < vpd_pg83->data + vpd_pg83->len) { | 
|---|
| 3414 | switch (d[1] & 0xf) { | 
|---|
| 3415 | case 0x4: | 
|---|
| 3416 | /* Relative target port */ | 
|---|
| 3417 | rel_port = get_unaligned_be16(p: &d[6]); | 
|---|
| 3418 | break; | 
|---|
| 3419 | case 0x5: | 
|---|
| 3420 | /* Target port group */ | 
|---|
| 3421 | group_id = get_unaligned_be16(p: &d[6]); | 
|---|
| 3422 | break; | 
|---|
| 3423 | default: | 
|---|
| 3424 | break; | 
|---|
| 3425 | } | 
|---|
| 3426 | d += d[3] + 4; | 
|---|
| 3427 | } | 
|---|
| 3428 | rcu_read_unlock(); | 
|---|
| 3429 |  | 
|---|
| 3430 | if (group_id >= 0 && rel_id && rel_port != -1) | 
|---|
| 3431 | *rel_id = rel_port; | 
|---|
| 3432 |  | 
|---|
| 3433 | return group_id; | 
|---|
| 3434 | } | 
|---|
| 3435 | EXPORT_SYMBOL(scsi_vpd_tpg_id); | 
|---|
| 3436 |  | 
|---|
| 3437 | /** | 
|---|
| 3438 | * scsi_build_sense - build sense data for a command | 
|---|
| 3439 | * @scmd:	scsi command for which the sense should be formatted | 
|---|
| 3440 | * @desc:	Sense format (non-zero == descriptor format, | 
|---|
| 3441 | *              0 == fixed format) | 
|---|
| 3442 | * @key:	Sense key | 
|---|
| 3443 | * @asc:	Additional sense code | 
|---|
| 3444 | * @ascq:	Additional sense code qualifier | 
|---|
| 3445 | * | 
|---|
| 3446 | **/ | 
|---|
| 3447 | void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq) | 
|---|
| 3448 | { | 
|---|
| 3449 | scsi_build_sense_buffer(desc, buf: scmd->sense_buffer, key, asc, ascq); | 
|---|
| 3450 | scmd->result = SAM_STAT_CHECK_CONDITION; | 
|---|
| 3451 | } | 
|---|
| 3452 | EXPORT_SYMBOL_GPL(scsi_build_sense); | 
|---|
| 3453 |  | 
|---|
| 3454 | #ifdef CONFIG_SCSI_LIB_KUNIT_TEST | 
|---|
| 3455 | #include "scsi_lib_test.c" | 
|---|
| 3456 | #endif | 
|---|
| 3457 |  | 
|---|