| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | /* | 
|---|
| 3 | * Block data types and constants.  Directly include this file only to | 
|---|
| 4 | * break include dependency loop. | 
|---|
| 5 | */ | 
|---|
| 6 | #ifndef __LINUX_BLK_TYPES_H | 
|---|
| 7 | #define __LINUX_BLK_TYPES_H | 
|---|
| 8 |  | 
|---|
| 9 | #include <linux/types.h> | 
|---|
| 10 | #include <linux/bvec.h> | 
|---|
| 11 | #include <linux/device.h> | 
|---|
| 12 | #include <linux/ktime.h> | 
|---|
| 13 | #include <linux/rw_hint.h> | 
|---|
| 14 |  | 
|---|
| 15 | struct bio_set; | 
|---|
| 16 | struct bio; | 
|---|
| 17 | struct bio_integrity_payload; | 
|---|
| 18 | struct page; | 
|---|
| 19 | struct io_context; | 
|---|
| 20 | struct cgroup_subsys_state; | 
|---|
| 21 | typedef void (bio_end_io_t) (struct bio *); | 
|---|
| 22 | struct bio_crypt_ctx; | 
|---|
| 23 |  | 
|---|
| 24 | /* | 
|---|
| 25 | * The basic unit of block I/O is a sector. It is used in a number of contexts | 
|---|
| 26 | * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 | 
|---|
| 27 | * bytes. Variables of type sector_t represent an offset or size that is a | 
|---|
| 28 | * multiple of 512 bytes. Hence these two constants. | 
|---|
| 29 | */ | 
|---|
| 30 | #ifndef SECTOR_SHIFT | 
|---|
| 31 | #define SECTOR_SHIFT 9 | 
|---|
| 32 | #endif | 
|---|
| 33 | #ifndef SECTOR_SIZE | 
|---|
| 34 | #define SECTOR_SIZE (1 << SECTOR_SHIFT) | 
|---|
| 35 | #endif | 
|---|
| 36 |  | 
|---|
| 37 | #define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT) | 
|---|
| 38 | #define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT) | 
|---|
| 39 | #define SECTOR_MASK		(PAGE_SECTORS - 1) | 
|---|
| 40 |  | 
|---|
| 41 | struct block_device { | 
|---|
| 42 | sector_t		bd_start_sect; | 
|---|
| 43 | sector_t		bd_nr_sectors; | 
|---|
| 44 | struct gendisk *	bd_disk; | 
|---|
| 45 | struct request_queue *	bd_queue; | 
|---|
| 46 | struct disk_stats __percpu *bd_stats; | 
|---|
| 47 | unsigned long		bd_stamp; | 
|---|
| 48 | atomic_t		__bd_flags;	// partition number + flags | 
|---|
| 49 | #define BD_PARTNO		255	// lower 8 bits; assign-once | 
|---|
| 50 | #define BD_READ_ONLY		(1u<<8) // read-only policy | 
|---|
| 51 | #define BD_WRITE_HOLDER		(1u<<9) | 
|---|
| 52 | #define BD_HAS_SUBMIT_BIO	(1u<<10) | 
|---|
| 53 | #define BD_RO_WARNED		(1u<<11) | 
|---|
| 54 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 
|---|
| 55 | #define BD_MAKE_IT_FAIL		(1u<<12) | 
|---|
| 56 | #endif | 
|---|
| 57 | dev_t			bd_dev; | 
|---|
| 58 | struct address_space	*bd_mapping;	/* page cache */ | 
|---|
| 59 |  | 
|---|
| 60 | atomic_t		bd_openers; | 
|---|
| 61 | spinlock_t		bd_size_lock; /* for bd_inode->i_size updates */ | 
|---|
| 62 | void *			bd_claiming; | 
|---|
| 63 | void *			bd_holder; | 
|---|
| 64 | const struct blk_holder_ops *bd_holder_ops; | 
|---|
| 65 | struct mutex		bd_holder_lock; | 
|---|
| 66 | int			bd_holders; | 
|---|
| 67 | struct kobject		*bd_holder_dir; | 
|---|
| 68 |  | 
|---|
| 69 | atomic_t		bd_fsfreeze_count; /* number of freeze requests */ | 
|---|
| 70 | struct mutex		bd_fsfreeze_mutex; /* serialize freeze/thaw */ | 
|---|
| 71 |  | 
|---|
| 72 | struct partition_meta_info *bd_meta_info; | 
|---|
| 73 | int			bd_writers; | 
|---|
| 74 | #ifdef CONFIG_SECURITY | 
|---|
| 75 | void			*bd_security; | 
|---|
| 76 | #endif | 
|---|
| 77 | /* | 
|---|
| 78 | * keep this out-of-line as it's both big and not needed in the fast | 
|---|
| 79 | * path | 
|---|
| 80 | */ | 
|---|
| 81 | struct device		bd_device; | 
|---|
| 82 | } __randomize_layout; | 
|---|
| 83 |  | 
|---|
| 84 | #define bdev_whole(_bdev) \ | 
|---|
| 85 | ((_bdev)->bd_disk->part0) | 
|---|
| 86 |  | 
|---|
| 87 | #define dev_to_bdev(device) \ | 
|---|
| 88 | container_of((device), struct block_device, bd_device) | 
|---|
| 89 |  | 
|---|
| 90 | #define bdev_kobj(_bdev) \ | 
|---|
| 91 | (&((_bdev)->bd_device.kobj)) | 
|---|
| 92 |  | 
|---|
| 93 | /* | 
|---|
| 94 | * Block error status values.  See block/blk-core:blk_errors for the details. | 
|---|
| 95 | */ | 
|---|
| 96 | typedef u8 __bitwise blk_status_t; | 
|---|
| 97 | typedef u16 blk_short_t; | 
|---|
| 98 | #define	BLK_STS_OK 0 | 
|---|
| 99 | #define BLK_STS_NOTSUPP		((__force blk_status_t)1) | 
|---|
| 100 | #define BLK_STS_TIMEOUT		((__force blk_status_t)2) | 
|---|
| 101 | #define BLK_STS_NOSPC		((__force blk_status_t)3) | 
|---|
| 102 | #define BLK_STS_TRANSPORT	((__force blk_status_t)4) | 
|---|
| 103 | #define BLK_STS_TARGET		((__force blk_status_t)5) | 
|---|
| 104 | #define BLK_STS_RESV_CONFLICT	((__force blk_status_t)6) | 
|---|
| 105 | #define BLK_STS_MEDIUM		((__force blk_status_t)7) | 
|---|
| 106 | #define BLK_STS_PROTECTION	((__force blk_status_t)8) | 
|---|
| 107 | #define BLK_STS_RESOURCE	((__force blk_status_t)9) | 
|---|
| 108 | #define BLK_STS_IOERR		((__force blk_status_t)10) | 
|---|
| 109 |  | 
|---|
| 110 | /* hack for device mapper, don't use elsewhere: */ | 
|---|
| 111 | #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11) | 
|---|
| 112 |  | 
|---|
| 113 | /* | 
|---|
| 114 | * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set | 
|---|
| 115 | * and the bio would block (cf bio_wouldblock_error()) | 
|---|
| 116 | */ | 
|---|
| 117 | #define BLK_STS_AGAIN		((__force blk_status_t)12) | 
|---|
| 118 |  | 
|---|
| 119 | /* | 
|---|
| 120 | * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if | 
|---|
| 121 | * device related resources are unavailable, but the driver can guarantee | 
|---|
| 122 | * that the queue will be rerun in the future once resources become | 
|---|
| 123 | * available again. This is typically the case for device specific | 
|---|
| 124 | * resources that are consumed for IO. If the driver fails allocating these | 
|---|
| 125 | * resources, we know that inflight (or pending) IO will free these | 
|---|
| 126 | * resource upon completion. | 
|---|
| 127 | * | 
|---|
| 128 | * This is different from BLK_STS_RESOURCE in that it explicitly references | 
|---|
| 129 | * a device specific resource. For resources of wider scope, allocation | 
|---|
| 130 | * failure can happen without having pending IO. This means that we can't | 
|---|
| 131 | * rely on request completions freeing these resources, as IO may not be in | 
|---|
| 132 | * flight. Examples of that are kernel memory allocations, DMA mappings, or | 
|---|
| 133 | * any other system wide resources. | 
|---|
| 134 | */ | 
|---|
| 135 | #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13) | 
|---|
| 136 |  | 
|---|
| 137 | /* | 
|---|
| 138 | * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion | 
|---|
| 139 | * path if the device returns a status indicating that too many zone resources | 
|---|
| 140 | * are currently open. The same command should be successful if resubmitted | 
|---|
| 141 | * after the number of open zones decreases below the device's limits, which is | 
|---|
| 142 | * reported in the request_queue's max_open_zones. | 
|---|
| 143 | */ | 
|---|
| 144 | #define BLK_STS_ZONE_OPEN_RESOURCE	((__force blk_status_t)14) | 
|---|
| 145 |  | 
|---|
| 146 | /* | 
|---|
| 147 | * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion | 
|---|
| 148 | * path if the device returns a status indicating that too many zone resources | 
|---|
| 149 | * are currently active. The same command should be successful if resubmitted | 
|---|
| 150 | * after the number of active zones decreases below the device's limits, which | 
|---|
| 151 | * is reported in the request_queue's max_active_zones. | 
|---|
| 152 | */ | 
|---|
| 153 | #define BLK_STS_ZONE_ACTIVE_RESOURCE	((__force blk_status_t)15) | 
|---|
| 154 |  | 
|---|
| 155 | /* | 
|---|
| 156 | * BLK_STS_OFFLINE is returned from the driver when the target device is offline | 
|---|
| 157 | * or is being taken offline. This could help differentiate the case where a | 
|---|
| 158 | * device is intentionally being shut down from a real I/O error. | 
|---|
| 159 | */ | 
|---|
| 160 | #define BLK_STS_OFFLINE		((__force blk_status_t)16) | 
|---|
| 161 |  | 
|---|
| 162 | /* | 
|---|
| 163 | * BLK_STS_DURATION_LIMIT is returned from the driver when the target device | 
|---|
| 164 | * aborted the command because it exceeded one of its Command Duration Limits. | 
|---|
| 165 | */ | 
|---|
| 166 | #define BLK_STS_DURATION_LIMIT	((__force blk_status_t)17) | 
|---|
| 167 |  | 
|---|
| 168 | /* | 
|---|
| 169 | * Invalid size or alignment. | 
|---|
| 170 | */ | 
|---|
| 171 | #define BLK_STS_INVAL	((__force blk_status_t)19) | 
|---|
| 172 |  | 
|---|
| 173 | /** | 
|---|
| 174 | * blk_path_error - returns true if error may be path related | 
|---|
| 175 | * @error: status the request was completed with | 
|---|
| 176 | * | 
|---|
| 177 | * Description: | 
|---|
| 178 | *     This classifies block error status into non-retryable errors and ones | 
|---|
| 179 | *     that may be successful if retried on a failover path. | 
|---|
| 180 | * | 
|---|
| 181 | * Return: | 
|---|
| 182 | *     %false - retrying failover path will not help | 
|---|
| 183 | *     %true  - may succeed if retried | 
|---|
| 184 | */ | 
|---|
| 185 | static inline bool blk_path_error(blk_status_t error) | 
|---|
| 186 | { | 
|---|
| 187 | switch (error) { | 
|---|
| 188 | case BLK_STS_NOTSUPP: | 
|---|
| 189 | case BLK_STS_NOSPC: | 
|---|
| 190 | case BLK_STS_TARGET: | 
|---|
| 191 | case BLK_STS_RESV_CONFLICT: | 
|---|
| 192 | case BLK_STS_MEDIUM: | 
|---|
| 193 | case BLK_STS_PROTECTION: | 
|---|
| 194 | return false; | 
|---|
| 195 | } | 
|---|
| 196 |  | 
|---|
| 197 | /* Anything else could be a path failure, so should be retried */ | 
|---|
| 198 | return true; | 
|---|
| 199 | } | 
|---|
| 200 |  | 
|---|
| 201 | typedef __u32 __bitwise blk_opf_t; | 
|---|
| 202 |  | 
|---|
| 203 | typedef unsigned int blk_qc_t; | 
|---|
| 204 | #define BLK_QC_T_NONE		-1U | 
|---|
| 205 |  | 
|---|
| 206 | /* | 
|---|
| 207 | * main unit of I/O for the block layer and lower layers (ie drivers and | 
|---|
| 208 | * stacking drivers) | 
|---|
| 209 | */ | 
|---|
| 210 | struct bio { | 
|---|
| 211 | struct bio		*bi_next;	/* request queue link */ | 
|---|
| 212 | struct block_device	*bi_bdev; | 
|---|
| 213 | blk_opf_t		bi_opf;		/* bottom bits REQ_OP, top bits | 
|---|
| 214 | * req_flags. | 
|---|
| 215 | */ | 
|---|
| 216 | unsigned short		bi_flags;	/* BIO_* below */ | 
|---|
| 217 | unsigned short		bi_ioprio; | 
|---|
| 218 | enum rw_hint		bi_write_hint; | 
|---|
| 219 | u8			bi_write_stream; | 
|---|
| 220 | blk_status_t		bi_status; | 
|---|
| 221 | atomic_t		__bi_remaining; | 
|---|
| 222 |  | 
|---|
| 223 | struct bvec_iter	bi_iter; | 
|---|
| 224 |  | 
|---|
| 225 | union { | 
|---|
| 226 | /* for polled bios: */ | 
|---|
| 227 | blk_qc_t		bi_cookie; | 
|---|
| 228 | /* for plugged zoned writes only: */ | 
|---|
| 229 | unsigned int		__bi_nr_segments; | 
|---|
| 230 | }; | 
|---|
| 231 | bio_end_io_t		*bi_end_io; | 
|---|
| 232 | void			*bi_private; | 
|---|
| 233 | #ifdef CONFIG_BLK_CGROUP | 
|---|
| 234 | /* | 
|---|
| 235 | * Represents the association of the css and request_queue for the bio. | 
|---|
| 236 | * If a bio goes direct to device, it will not have a blkg as it will | 
|---|
| 237 | * not have a request_queue associated with it.  The reference is put | 
|---|
| 238 | * on release of the bio. | 
|---|
| 239 | */ | 
|---|
| 240 | struct blkcg_gq		*bi_blkg; | 
|---|
| 241 | /* Time that this bio was issued. */ | 
|---|
| 242 | u64			issue_time_ns; | 
|---|
| 243 | #ifdef CONFIG_BLK_CGROUP_IOCOST | 
|---|
| 244 | u64			bi_iocost_cost; | 
|---|
| 245 | #endif | 
|---|
| 246 | #endif | 
|---|
| 247 |  | 
|---|
| 248 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | 
|---|
| 249 | struct bio_crypt_ctx	*bi_crypt_context; | 
|---|
| 250 | #endif | 
|---|
| 251 |  | 
|---|
| 252 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 
|---|
| 253 | struct bio_integrity_payload *bi_integrity; /* data integrity */ | 
|---|
| 254 | #endif | 
|---|
| 255 |  | 
|---|
| 256 | unsigned short		bi_vcnt;	/* how many bio_vec's */ | 
|---|
| 257 |  | 
|---|
| 258 | /* | 
|---|
| 259 | * Everything starting with bi_max_vecs will be preserved by bio_reset() | 
|---|
| 260 | */ | 
|---|
| 261 |  | 
|---|
| 262 | unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */ | 
|---|
| 263 |  | 
|---|
| 264 | atomic_t		__bi_cnt;	/* pin count */ | 
|---|
| 265 |  | 
|---|
| 266 | struct bio_vec		*bi_io_vec;	/* the actual vec list */ | 
|---|
| 267 |  | 
|---|
| 268 | struct bio_set		*bi_pool; | 
|---|
| 269 | }; | 
|---|
| 270 |  | 
|---|
| 271 | #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs) | 
|---|
| 272 | #define BIO_MAX_SECTORS		(UINT_MAX >> SECTOR_SHIFT) | 
|---|
| 273 |  | 
|---|
| 274 | static inline struct bio_vec *bio_inline_vecs(struct bio *bio) | 
|---|
| 275 | { | 
|---|
| 276 | return (struct bio_vec *)(bio + 1); | 
|---|
| 277 | } | 
|---|
| 278 |  | 
|---|
| 279 | /* | 
|---|
| 280 | * bio flags | 
|---|
| 281 | */ | 
|---|
| 282 | enum { | 
|---|
| 283 | BIO_PAGE_PINNED,	/* Unpin pages in bio_release_pages() */ | 
|---|
| 284 | BIO_CLONED,		/* doesn't own data */ | 
|---|
| 285 | BIO_QUIET,		/* Make BIO Quiet */ | 
|---|
| 286 | BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */ | 
|---|
| 287 | BIO_REFFED,		/* bio has elevated ->bi_cnt */ | 
|---|
| 288 | BIO_BPS_THROTTLED,	/* This bio has already been subjected to | 
|---|
| 289 | * throttling rules. Don't do it again. */ | 
|---|
| 290 | BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion | 
|---|
| 291 | * of this bio. */ | 
|---|
| 292 | BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */ | 
|---|
| 293 | BIO_QOS_THROTTLED,	/* bio went through rq_qos throttle path */ | 
|---|
| 294 | /* | 
|---|
| 295 | * This bio has completed bps throttling at the single tg granularity, | 
|---|
| 296 | * which is different from BIO_BPS_THROTTLED. When the bio is enqueued | 
|---|
| 297 | * into the sq->queued of the upper tg, or is about to be dispatched, | 
|---|
| 298 | * this flag needs to be cleared. Since blk-throttle and rq_qos are not | 
|---|
| 299 | * on the same hierarchical level, reuse the value. | 
|---|
| 300 | */ | 
|---|
| 301 | BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED, | 
|---|
| 302 | BIO_QOS_MERGED,		/* but went through rq_qos merge path */ | 
|---|
| 303 | BIO_REMAPPED, | 
|---|
| 304 | BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ | 
|---|
| 305 | BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */ | 
|---|
| 306 | BIO_FLAG_LAST | 
|---|
| 307 | }; | 
|---|
| 308 |  | 
|---|
| 309 | typedef __u32 __bitwise blk_mq_req_flags_t; | 
|---|
| 310 |  | 
|---|
| 311 | #define REQ_OP_BITS	8 | 
|---|
| 312 | #define REQ_OP_MASK	(__force blk_opf_t)((1 << REQ_OP_BITS) - 1) | 
|---|
| 313 | #define REQ_FLAG_BITS	24 | 
|---|
| 314 |  | 
|---|
| 315 | /** | 
|---|
| 316 | * enum req_op - Operations common to the bio and request structures. | 
|---|
| 317 | * We use 8 bits for encoding the operation, and the remaining 24 for flags. | 
|---|
| 318 | * | 
|---|
| 319 | * The least significant bit of the operation number indicates the data | 
|---|
| 320 | * transfer direction: | 
|---|
| 321 | * | 
|---|
| 322 | *   - if the least significant bit is set transfers are TO the device | 
|---|
| 323 | *   - if the least significant bit is not set transfers are FROM the device | 
|---|
| 324 | * | 
|---|
| 325 | * If a operation does not transfer data the least significant bit has no | 
|---|
| 326 | * meaning. | 
|---|
| 327 | */ | 
|---|
| 328 | enum req_op { | 
|---|
| 329 | /* read sectors from the device */ | 
|---|
| 330 | REQ_OP_READ		= (__force blk_opf_t)0, | 
|---|
| 331 | /* write sectors to the device */ | 
|---|
| 332 | REQ_OP_WRITE		= (__force blk_opf_t)1, | 
|---|
| 333 | /* flush the volatile write cache */ | 
|---|
| 334 | REQ_OP_FLUSH		= (__force blk_opf_t)2, | 
|---|
| 335 | /* discard sectors */ | 
|---|
| 336 | REQ_OP_DISCARD		= (__force blk_opf_t)3, | 
|---|
| 337 | /* securely erase sectors */ | 
|---|
| 338 | REQ_OP_SECURE_ERASE	= (__force blk_opf_t)5, | 
|---|
| 339 | /* write data at the current zone write pointer */ | 
|---|
| 340 | REQ_OP_ZONE_APPEND	= (__force blk_opf_t)7, | 
|---|
| 341 | /* write the zero filled sector many times */ | 
|---|
| 342 | REQ_OP_WRITE_ZEROES	= (__force blk_opf_t)9, | 
|---|
| 343 | /* Open a zone */ | 
|---|
| 344 | REQ_OP_ZONE_OPEN	= (__force blk_opf_t)10, | 
|---|
| 345 | /* Close a zone */ | 
|---|
| 346 | REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)11, | 
|---|
| 347 | /* Transition a zone to full */ | 
|---|
| 348 | REQ_OP_ZONE_FINISH	= (__force blk_opf_t)13, | 
|---|
| 349 | /* reset a zone write pointer */ | 
|---|
| 350 | REQ_OP_ZONE_RESET	= (__force blk_opf_t)15, | 
|---|
| 351 | /* reset all the zone present on the device */ | 
|---|
| 352 | REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)17, | 
|---|
| 353 |  | 
|---|
| 354 | /* Driver private requests */ | 
|---|
| 355 | REQ_OP_DRV_IN		= (__force blk_opf_t)34, | 
|---|
| 356 | REQ_OP_DRV_OUT		= (__force blk_opf_t)35, | 
|---|
| 357 |  | 
|---|
| 358 | REQ_OP_LAST		= (__force blk_opf_t)36, | 
|---|
| 359 | }; | 
|---|
| 360 |  | 
|---|
| 361 | /* Keep cmd_flag_name[] in sync with the definitions below */ | 
|---|
| 362 | enum req_flag_bits { | 
|---|
| 363 | __REQ_FAILFAST_DEV =	/* no driver retries of device errors */ | 
|---|
| 364 | REQ_OP_BITS, | 
|---|
| 365 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | 
|---|
| 366 | __REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */ | 
|---|
| 367 | __REQ_SYNC,		/* request is sync (sync write or read) */ | 
|---|
| 368 | __REQ_META,		/* metadata io request */ | 
|---|
| 369 | __REQ_PRIO,		/* boost priority in cfq */ | 
|---|
| 370 | __REQ_NOMERGE,		/* don't touch this for merging */ | 
|---|
| 371 | __REQ_IDLE,		/* anticipate more IO after this one */ | 
|---|
| 372 | __REQ_INTEGRITY,	/* I/O includes block integrity payload */ | 
|---|
| 373 | __REQ_FUA,		/* forced unit access */ | 
|---|
| 374 | __REQ_PREFLUSH,		/* request for cache flush */ | 
|---|
| 375 | __REQ_RAHEAD,		/* read ahead, can fail anytime */ | 
|---|
| 376 | __REQ_BACKGROUND,	/* background IO */ | 
|---|
| 377 | __REQ_NOWAIT,           /* Don't wait if request will block */ | 
|---|
| 378 | __REQ_POLLED,		/* caller polls for completion using bio_poll */ | 
|---|
| 379 | __REQ_ALLOC_CACHE,	/* allocate IO from cache if available */ | 
|---|
| 380 | __REQ_SWAP,		/* swap I/O */ | 
|---|
| 381 | __REQ_DRV,		/* for driver use */ | 
|---|
| 382 | __REQ_FS_PRIVATE,	/* for file system (submitter) use */ | 
|---|
| 383 | __REQ_ATOMIC,		/* for atomic write operations */ | 
|---|
| 384 | __REQ_P2PDMA,		/* contains P2P DMA pages */ | 
|---|
| 385 | /* | 
|---|
| 386 | * Command specific flags, keep last: | 
|---|
| 387 | */ | 
|---|
| 388 | /* for REQ_OP_WRITE_ZEROES: */ | 
|---|
| 389 | __REQ_NOUNMAP,		/* do not free blocks when zeroing */ | 
|---|
| 390 |  | 
|---|
| 391 | __REQ_NR_BITS,		/* stops here */ | 
|---|
| 392 | }; | 
|---|
| 393 |  | 
|---|
| 394 | #define REQ_FAILFAST_DEV	\ | 
|---|
| 395 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) | 
|---|
| 396 | #define REQ_FAILFAST_TRANSPORT	\ | 
|---|
| 397 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) | 
|---|
| 398 | #define REQ_FAILFAST_DRIVER	\ | 
|---|
| 399 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) | 
|---|
| 400 | #define REQ_SYNC	(__force blk_opf_t)(1ULL << __REQ_SYNC) | 
|---|
| 401 | #define REQ_META	(__force blk_opf_t)(1ULL << __REQ_META) | 
|---|
| 402 | #define REQ_PRIO	(__force blk_opf_t)(1ULL << __REQ_PRIO) | 
|---|
| 403 | #define REQ_NOMERGE	(__force blk_opf_t)(1ULL << __REQ_NOMERGE) | 
|---|
| 404 | #define REQ_IDLE	(__force blk_opf_t)(1ULL << __REQ_IDLE) | 
|---|
| 405 | #define REQ_INTEGRITY	(__force blk_opf_t)(1ULL << __REQ_INTEGRITY) | 
|---|
| 406 | #define REQ_FUA		(__force blk_opf_t)(1ULL << __REQ_FUA) | 
|---|
| 407 | #define REQ_PREFLUSH	(__force blk_opf_t)(1ULL << __REQ_PREFLUSH) | 
|---|
| 408 | #define REQ_RAHEAD	(__force blk_opf_t)(1ULL << __REQ_RAHEAD) | 
|---|
| 409 | #define REQ_BACKGROUND	(__force blk_opf_t)(1ULL << __REQ_BACKGROUND) | 
|---|
| 410 | #define REQ_NOWAIT	(__force blk_opf_t)(1ULL << __REQ_NOWAIT) | 
|---|
| 411 | #define REQ_POLLED	(__force blk_opf_t)(1ULL << __REQ_POLLED) | 
|---|
| 412 | #define REQ_ALLOC_CACHE	(__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) | 
|---|
| 413 | #define REQ_SWAP	(__force blk_opf_t)(1ULL << __REQ_SWAP) | 
|---|
| 414 | #define REQ_DRV		(__force blk_opf_t)(1ULL << __REQ_DRV) | 
|---|
| 415 | #define REQ_FS_PRIVATE	(__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) | 
|---|
| 416 | #define REQ_ATOMIC	(__force blk_opf_t)(1ULL << __REQ_ATOMIC) | 
|---|
| 417 | #define REQ_P2PDMA	(__force blk_opf_t)(1ULL << __REQ_P2PDMA) | 
|---|
| 418 |  | 
|---|
| 419 | #define REQ_NOUNMAP	(__force blk_opf_t)(1ULL << __REQ_NOUNMAP) | 
|---|
| 420 |  | 
|---|
| 421 | #define REQ_FAILFAST_MASK \ | 
|---|
| 422 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 
|---|
| 423 |  | 
|---|
| 424 | #define REQ_NOMERGE_FLAGS \ | 
|---|
| 425 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) | 
|---|
| 426 |  | 
|---|
| 427 | enum stat_group { | 
|---|
| 428 | STAT_READ, | 
|---|
| 429 | STAT_WRITE, | 
|---|
| 430 | STAT_DISCARD, | 
|---|
| 431 | STAT_FLUSH, | 
|---|
| 432 |  | 
|---|
| 433 | NR_STAT_GROUPS | 
|---|
| 434 | }; | 
|---|
| 435 |  | 
|---|
| 436 | static inline enum req_op bio_op(const struct bio *bio) | 
|---|
| 437 | { | 
|---|
| 438 | return bio->bi_opf & REQ_OP_MASK; | 
|---|
| 439 | } | 
|---|
| 440 |  | 
|---|
| 441 | static inline bool op_is_write(blk_opf_t op) | 
|---|
| 442 | { | 
|---|
| 443 | return !!(op & (__force blk_opf_t)1); | 
|---|
| 444 | } | 
|---|
| 445 |  | 
|---|
| 446 | /* | 
|---|
| 447 | * Check if the bio or request is one that needs special treatment in the | 
|---|
| 448 | * flush state machine. | 
|---|
| 449 | */ | 
|---|
| 450 | static inline bool op_is_flush(blk_opf_t op) | 
|---|
| 451 | { | 
|---|
| 452 | return op & (REQ_FUA | REQ_PREFLUSH); | 
|---|
| 453 | } | 
|---|
| 454 |  | 
|---|
| 455 | /* | 
|---|
| 456 | * Reads are always treated as synchronous, as are requests with the FUA or | 
|---|
| 457 | * PREFLUSH flag.  Other operations may be marked as synchronous using the | 
|---|
| 458 | * REQ_SYNC flag. | 
|---|
| 459 | */ | 
|---|
| 460 | static inline bool op_is_sync(blk_opf_t op) | 
|---|
| 461 | { | 
|---|
| 462 | return (op & REQ_OP_MASK) == REQ_OP_READ || | 
|---|
| 463 | (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); | 
|---|
| 464 | } | 
|---|
| 465 |  | 
|---|
| 466 | static inline bool op_is_discard(blk_opf_t op) | 
|---|
| 467 | { | 
|---|
| 468 | return (op & REQ_OP_MASK) == REQ_OP_DISCARD; | 
|---|
| 469 | } | 
|---|
| 470 |  | 
|---|
| 471 | /* | 
|---|
| 472 | * Check if a bio or request operation is a zone management operation, with | 
|---|
| 473 | * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case | 
|---|
| 474 | * due to its different handling in the block layer and device response in | 
|---|
| 475 | * case of command failure. | 
|---|
| 476 | */ | 
|---|
| 477 | static inline bool op_is_zone_mgmt(enum req_op op) | 
|---|
| 478 | { | 
|---|
| 479 | switch (op & REQ_OP_MASK) { | 
|---|
| 480 | case REQ_OP_ZONE_RESET: | 
|---|
| 481 | case REQ_OP_ZONE_OPEN: | 
|---|
| 482 | case REQ_OP_ZONE_CLOSE: | 
|---|
| 483 | case REQ_OP_ZONE_FINISH: | 
|---|
| 484 | return true; | 
|---|
| 485 | default: | 
|---|
| 486 | return false; | 
|---|
| 487 | } | 
|---|
| 488 | } | 
|---|
| 489 |  | 
|---|
| 490 | static inline int op_stat_group(enum req_op op) | 
|---|
| 491 | { | 
|---|
| 492 | if (op_is_discard(op)) | 
|---|
| 493 | return STAT_DISCARD; | 
|---|
| 494 | return op_is_write(op); | 
|---|
| 495 | } | 
|---|
| 496 |  | 
|---|
| 497 | struct blk_rq_stat { | 
|---|
| 498 | u64 mean; | 
|---|
| 499 | u64 min; | 
|---|
| 500 | u64 max; | 
|---|
| 501 | u32 nr_samples; | 
|---|
| 502 | u64 batch; | 
|---|
| 503 | }; | 
|---|
| 504 |  | 
|---|
| 505 | #endif /* __LINUX_BLK_TYPES_H */ | 
|---|
| 506 |  | 
|---|