1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/bio-integrity.h>
6#include <linux/blk-crypto.h>
7#include <linux/lockdep.h>
8#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
9#include <linux/sched/sysctl.h>
10#include <linux/timekeeping.h>
11#include <xen/xen.h>
12#include "blk-crypto-internal.h"
13
14struct elevator_type;
15struct elevator_tags;
16
17/*
18 * Default upper limit for the software max_sectors limit used for regular I/Os.
19 * This can be increased through sysfs.
20 *
21 * This should not be confused with the max_hw_sector limit that is entirely
22 * controlled by the block device driver, usually based on hardware limits.
23 */
24#define BLK_DEF_MAX_SECTORS_CAP (SZ_4M >> SECTOR_SHIFT)
25
26#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
27#define BLK_MIN_SEGMENT_SIZE 4096
28
29/* Max future timer expiry for timeouts */
30#define BLK_MAX_TIMEOUT (5 * HZ)
31
32extern const struct kobj_type blk_queue_ktype;
33extern struct dentry *blk_debugfs_root;
34
35struct blk_flush_queue {
36 spinlock_t mq_flush_lock;
37 unsigned int flush_pending_idx:1;
38 unsigned int flush_running_idx:1;
39 blk_status_t rq_status;
40 unsigned long flush_pending_since;
41 struct list_head flush_queue[2];
42 unsigned long flush_data_in_flight;
43 struct request *flush_rq;
44 struct rcu_head rcu_head;
45};
46
47bool is_flush_rq(struct request *req);
48
49struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
50 gfp_t flags);
51void blk_free_flush_queue(struct blk_flush_queue *q);
52
53bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
54bool blk_queue_start_drain(struct request_queue *q);
55bool __blk_freeze_queue_start(struct request_queue *q,
56 struct task_struct *owner);
57int __bio_queue_enter(struct request_queue *q, struct bio *bio);
58void submit_bio_noacct_nocheck(struct bio *bio, bool split);
59void bio_await_chain(struct bio *bio);
60
61static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
62{
63 rcu_read_lock();
64 if (!percpu_ref_tryget_live_rcu(ref: &q->q_usage_counter))
65 goto fail;
66
67 /*
68 * The code that increments the pm_only counter must ensure that the
69 * counter is globally visible before the queue is unfrozen.
70 */
71 if (blk_queue_pm_only(q) &&
72 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
73 goto fail_put;
74
75 rcu_read_unlock();
76 return true;
77
78fail_put:
79 blk_queue_exit(q);
80fail:
81 rcu_read_unlock();
82 return false;
83}
84
85static inline int bio_queue_enter(struct bio *bio)
86{
87 struct request_queue *q = bdev_get_queue(bdev: bio->bi_bdev);
88
89 if (blk_try_enter_queue(q, pm: false)) {
90 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
91 rwsem_release(&q->io_lockdep_map, _RET_IP_);
92 return 0;
93 }
94 return __bio_queue_enter(q, bio);
95}
96
97static inline void blk_wait_io(struct completion *done)
98{
99 /* Prevent hang_check timer from firing at us during very long I/O */
100 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
101
102 if (timeout)
103 while (!wait_for_completion_io_timeout(x: done, timeout))
104 ;
105 else
106 wait_for_completion_io(done);
107}
108
109struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
110void blkdev_put_no_open(struct block_device *bdev);
111
112#define BIO_INLINE_VECS 4
113struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
114 gfp_t gfp_mask);
115void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
116
117bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
118 struct page *page, unsigned len, unsigned offset);
119
120static inline bool biovec_phys_mergeable(struct request_queue *q,
121 struct bio_vec *vec1, struct bio_vec *vec2)
122{
123 unsigned long mask = queue_segment_boundary(q);
124 phys_addr_t addr1 = bvec_phys(bvec: vec1);
125 phys_addr_t addr2 = bvec_phys(bvec: vec2);
126
127 /*
128 * Merging adjacent physical pages may not work correctly under KMSAN
129 * if their metadata pages aren't adjacent. Just disable merging.
130 */
131 if (IS_ENABLED(CONFIG_KMSAN))
132 return false;
133
134 if (addr1 + vec1->bv_len != addr2)
135 return false;
136 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, page: vec2->bv_page))
137 return false;
138 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
139 return false;
140 return true;
141}
142
143static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
144 struct bio_vec *bprv, unsigned int offset)
145{
146 return (offset & lim->virt_boundary_mask) ||
147 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
148}
149
150/*
151 * Check if adding a bio_vec after bprv with offset would create a gap in
152 * the SG list. Most drivers don't care about this, but some do.
153 */
154static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
155 struct bio_vec *bprv, unsigned int offset)
156{
157 if (!lim->virt_boundary_mask)
158 return false;
159 return __bvec_gap_to_prev(lim, bprv, offset);
160}
161
162static inline bool rq_mergeable(struct request *rq)
163{
164 if (blk_rq_is_passthrough(rq))
165 return false;
166
167 if (req_op(req: rq) == REQ_OP_FLUSH)
168 return false;
169
170 if (req_op(req: rq) == REQ_OP_WRITE_ZEROES)
171 return false;
172
173 if (req_op(req: rq) == REQ_OP_ZONE_APPEND)
174 return false;
175
176 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
177 return false;
178 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
179 return false;
180
181 return true;
182}
183
184/*
185 * There are two different ways to handle DISCARD merges:
186 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
187 * send the bios to controller together. The ranges don't need to be
188 * contiguous.
189 * 2) Otherwise, the request will be normal read/write requests. The ranges
190 * need to be contiguous.
191 */
192static inline bool blk_discard_mergable(struct request *req)
193{
194 if (req_op(req) == REQ_OP_DISCARD &&
195 queue_max_discard_segments(q: req->q) > 1)
196 return true;
197 return false;
198}
199
200static inline unsigned int blk_rq_get_max_segments(struct request *rq)
201{
202 if (req_op(req: rq) == REQ_OP_DISCARD)
203 return queue_max_discard_segments(q: rq->q);
204 return queue_max_segments(q: rq->q);
205}
206
207static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
208{
209 struct request_queue *q = rq->q;
210 enum req_op op = req_op(req: rq);
211
212 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
213 return min(q->limits.max_discard_sectors,
214 UINT_MAX >> SECTOR_SHIFT);
215
216 if (unlikely(op == REQ_OP_WRITE_ZEROES))
217 return q->limits.max_write_zeroes_sectors;
218
219 if (rq->cmd_flags & REQ_ATOMIC)
220 return q->limits.atomic_write_max_sectors;
221
222 return q->limits.max_sectors;
223}
224
225#ifdef CONFIG_BLK_DEV_INTEGRITY
226void blk_flush_integrity(void);
227void bio_integrity_free(struct bio *bio);
228
229/*
230 * Integrity payloads can either be owned by the submitter, in which case
231 * bio_uninit will free them, or owned and generated by the block layer,
232 * in which case we'll verify them here (for reads) and free them before
233 * the bio is handed back to the submitted.
234 */
235bool __bio_integrity_endio(struct bio *bio);
236static inline bool bio_integrity_endio(struct bio *bio)
237{
238 struct bio_integrity_payload *bip = bio_integrity(bio);
239
240 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
241 return __bio_integrity_endio(bio);
242 return true;
243}
244
245bool blk_integrity_merge_rq(struct request_queue *, struct request *,
246 struct request *);
247bool blk_integrity_merge_bio(struct request_queue *, struct request *,
248 struct bio *);
249
250static inline bool integrity_req_gap_back_merge(struct request *req,
251 struct bio *next)
252{
253 struct bio_integrity_payload *bip = bio_integrity(req->bio);
254 struct bio_integrity_payload *bip_next = bio_integrity(next);
255
256 return bvec_gap_to_prev(&req->q->limits,
257 &bip->bip_vec[bip->bip_vcnt - 1],
258 bip_next->bip_vec[0].bv_offset);
259}
260
261static inline bool integrity_req_gap_front_merge(struct request *req,
262 struct bio *bio)
263{
264 struct bio_integrity_payload *bip = bio_integrity(bio);
265 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
266
267 return bvec_gap_to_prev(&req->q->limits,
268 &bip->bip_vec[bip->bip_vcnt - 1],
269 bip_next->bip_vec[0].bv_offset);
270}
271
272extern const struct attribute_group blk_integrity_attr_group;
273#else /* CONFIG_BLK_DEV_INTEGRITY */
274static inline bool blk_integrity_merge_rq(struct request_queue *rq,
275 struct request *r1, struct request *r2)
276{
277 return true;
278}
279static inline bool blk_integrity_merge_bio(struct request_queue *rq,
280 struct request *r, struct bio *b)
281{
282 return true;
283}
284static inline bool integrity_req_gap_back_merge(struct request *req,
285 struct bio *next)
286{
287 return false;
288}
289static inline bool integrity_req_gap_front_merge(struct request *req,
290 struct bio *bio)
291{
292 return false;
293}
294
295static inline void blk_flush_integrity(void)
296{
297}
298static inline bool bio_integrity_endio(struct bio *bio)
299{
300 return true;
301}
302static inline void bio_integrity_free(struct bio *bio)
303{
304}
305#endif /* CONFIG_BLK_DEV_INTEGRITY */
306
307unsigned long blk_rq_timeout(unsigned long timeout);
308void blk_add_timer(struct request *req);
309
310enum bio_merge_status {
311 BIO_MERGE_OK,
312 BIO_MERGE_NONE,
313 BIO_MERGE_FAILED,
314};
315
316enum bio_merge_status bio_attempt_back_merge(struct request *req,
317 struct bio *bio, unsigned int nr_segs);
318bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
319 unsigned int nr_segs);
320bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
321 struct bio *bio, unsigned int nr_segs);
322
323/*
324 * Plug flush limits
325 */
326#define BLK_MAX_REQUEST_COUNT 32
327#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
328
329/*
330 * Internal elevator interface
331 */
332#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
333
334bool blk_insert_flush(struct request *rq);
335
336void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
337 struct elevator_tags *t);
338void elevator_set_default(struct request_queue *q);
339void elevator_set_none(struct request_queue *q);
340
341ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
342 char *buf);
343ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
344 char *buf);
345ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
346 char *buf);
347ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
348 char *buf);
349ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
350 const char *buf, size_t count);
351ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
352ssize_t part_timeout_store(struct device *, struct device_attribute *,
353 const char *, size_t);
354
355struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
356 unsigned *nsegs);
357struct bio *bio_split_write_zeroes(struct bio *bio,
358 const struct queue_limits *lim, unsigned *nsegs);
359struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
360 unsigned *nr_segs);
361struct bio *bio_split_zone_append(struct bio *bio,
362 const struct queue_limits *lim, unsigned *nr_segs);
363
364/*
365 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
366 *
367 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
368 * always valid if a bio has data. The check might lead to occasional false
369 * positives when bios are cloned, but compared to the performance impact of
370 * cloned bios themselves the loop below doesn't matter anyway.
371 */
372static inline bool bio_may_need_split(struct bio *bio,
373 const struct queue_limits *lim)
374{
375 if (lim->chunk_sectors)
376 return true;
377 if (bio->bi_vcnt != 1)
378 return true;
379 return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
380 lim->min_segment_size;
381}
382
383/**
384 * __bio_split_to_limits - split a bio to fit the queue limits
385 * @bio: bio to be split
386 * @lim: queue limits to split based on
387 * @nr_segs: returns the number of segments in the returned bio
388 *
389 * Check if @bio needs splitting based on the queue limits, and if so split off
390 * a bio fitting the limits from the beginning of @bio and return it. @bio is
391 * shortened to the remainder and re-submitted.
392 *
393 * The split bio is allocated from @q->bio_split, which is provided by the
394 * block layer.
395 */
396static inline struct bio *__bio_split_to_limits(struct bio *bio,
397 const struct queue_limits *lim, unsigned int *nr_segs)
398{
399 switch (bio_op(bio)) {
400 case REQ_OP_READ:
401 case REQ_OP_WRITE:
402 if (bio_may_need_split(bio, lim))
403 return bio_split_rw(bio, lim, nr_segs);
404 *nr_segs = 1;
405 return bio;
406 case REQ_OP_ZONE_APPEND:
407 return bio_split_zone_append(bio, lim, nr_segs);
408 case REQ_OP_DISCARD:
409 case REQ_OP_SECURE_ERASE:
410 return bio_split_discard(bio, lim, nsegs: nr_segs);
411 case REQ_OP_WRITE_ZEROES:
412 return bio_split_write_zeroes(bio, lim, nsegs: nr_segs);
413 default:
414 /* other operations can't be split */
415 *nr_segs = 0;
416 return bio;
417 }
418}
419
420/**
421 * get_max_segment_size() - maximum number of bytes to add as a single segment
422 * @lim: Request queue limits.
423 * @paddr: address of the range to add
424 * @len: maximum length available to add at @paddr
425 *
426 * Returns the maximum number of bytes of the range starting at @paddr that can
427 * be added to a single segment.
428 */
429static inline unsigned get_max_segment_size(const struct queue_limits *lim,
430 phys_addr_t paddr, unsigned int len)
431{
432 /*
433 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
434 * after having calculated the minimum.
435 */
436 return min_t(unsigned long, len,
437 min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
438 (unsigned long)lim->max_segment_size - 1) + 1);
439}
440
441int ll_back_merge_fn(struct request *req, struct bio *bio,
442 unsigned int nr_segs);
443bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
444 struct request *next);
445unsigned int blk_recalc_rq_segments(struct request *rq);
446bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
447enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
448
449int blk_set_default_limits(struct queue_limits *lim);
450void blk_apply_bdi_limits(struct backing_dev_info *bdi,
451 struct queue_limits *lim);
452int blk_dev_init(void);
453
454void update_io_ticks(struct block_device *part, unsigned long now, bool end);
455
456static inline void req_set_nomerge(struct request_queue *q, struct request *req)
457{
458 req->cmd_flags |= REQ_NOMERGE;
459 if (req == q->last_merge)
460 q->last_merge = NULL;
461}
462
463/*
464 * Internal io_context interface
465 */
466struct io_cq *ioc_find_get_icq(struct request_queue *q);
467struct io_cq *ioc_lookup_icq(struct request_queue *q);
468#ifdef CONFIG_BLK_ICQ
469void ioc_clear_queue(struct request_queue *q);
470#else
471static inline void ioc_clear_queue(struct request_queue *q)
472{
473}
474#endif /* CONFIG_BLK_ICQ */
475
476#ifdef CONFIG_BLK_DEV_ZONED
477void disk_init_zone_resources(struct gendisk *disk);
478void disk_free_zone_resources(struct gendisk *disk);
479static inline bool bio_zone_write_plugging(struct bio *bio)
480{
481 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
482}
483static inline bool blk_req_bio_is_zone_append(struct request *rq,
484 struct bio *bio)
485{
486 return req_op(rq) == REQ_OP_ZONE_APPEND ||
487 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
488}
489void blk_zone_write_plug_bio_merged(struct bio *bio);
490void blk_zone_write_plug_init_request(struct request *rq);
491void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio);
492void blk_zone_write_plug_bio_endio(struct bio *bio);
493static inline void blk_zone_bio_endio(struct bio *bio)
494{
495 /*
496 * For write BIOs to zoned devices, signal the completion of the BIO so
497 * that the next write BIO can be submitted by zone write plugging.
498 */
499 if (bio_zone_write_plugging(bio))
500 blk_zone_write_plug_bio_endio(bio);
501}
502
503void blk_zone_write_plug_finish_request(struct request *rq);
504static inline void blk_zone_finish_request(struct request *rq)
505{
506 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
507 blk_zone_write_plug_finish_request(rq);
508}
509int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
510 unsigned long arg);
511int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
512 unsigned int cmd, unsigned long arg);
513#else /* CONFIG_BLK_DEV_ZONED */
514static inline void disk_init_zone_resources(struct gendisk *disk)
515{
516}
517static inline void disk_free_zone_resources(struct gendisk *disk)
518{
519}
520static inline bool bio_zone_write_plugging(struct bio *bio)
521{
522 return false;
523}
524static inline bool blk_req_bio_is_zone_append(struct request *req,
525 struct bio *bio)
526{
527 return false;
528}
529static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
530{
531}
532static inline void blk_zone_write_plug_init_request(struct request *rq)
533{
534}
535static inline void blk_zone_append_update_request_bio(struct request *rq,
536 struct bio *bio)
537{
538}
539static inline void blk_zone_bio_endio(struct bio *bio)
540{
541}
542static inline void blk_zone_finish_request(struct request *rq)
543{
544}
545static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
546 unsigned int cmd, unsigned long arg)
547{
548 return -ENOTTY;
549}
550static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
551 blk_mode_t mode, unsigned int cmd, unsigned long arg)
552{
553 return -ENOTTY;
554}
555#endif /* CONFIG_BLK_DEV_ZONED */
556
557struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
558void bdev_add(struct block_device *bdev, dev_t dev);
559void bdev_unhash(struct block_device *bdev);
560void bdev_drop(struct block_device *bdev);
561
562int blk_alloc_ext_minor(void);
563void blk_free_ext_minor(unsigned int minor);
564#define ADDPART_FLAG_NONE 0
565#define ADDPART_FLAG_RAID 1
566#define ADDPART_FLAG_WHOLEDISK 2
567#define ADDPART_FLAG_READONLY 4
568int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
569 sector_t length);
570int bdev_del_partition(struct gendisk *disk, int partno);
571int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
572 sector_t length);
573void drop_partition(struct block_device *part);
574
575void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
576
577struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
578 struct lock_class_key *lkclass);
579
580/*
581 * Clean up a page appropriately, where the page may be pinned, may have a
582 * ref taken on it or neither.
583 */
584static inline void bio_release_page(struct bio *bio, struct page *page)
585{
586 if (bio_flagged(bio, bit: BIO_PAGE_PINNED))
587 unpin_user_page(page);
588}
589
590struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
591
592int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
593
594int disk_alloc_events(struct gendisk *disk);
595void disk_add_events(struct gendisk *disk);
596void disk_del_events(struct gendisk *disk);
597void disk_release_events(struct gendisk *disk);
598void disk_block_events(struct gendisk *disk);
599void disk_unblock_events(struct gendisk *disk);
600void disk_flush_events(struct gendisk *disk, unsigned int mask);
601extern struct device_attribute dev_attr_events;
602extern struct device_attribute dev_attr_events_async;
603extern struct device_attribute dev_attr_events_poll_msecs;
604
605extern struct attribute_group blk_trace_attr_group;
606
607blk_mode_t file_to_blk_mode(struct file *file);
608int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
609 loff_t lstart, loff_t lend);
610long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
611int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
612long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
613
614extern const struct address_space_operations def_blk_aops;
615
616int disk_register_independent_access_ranges(struct gendisk *disk);
617void disk_unregister_independent_access_ranges(struct gendisk *disk);
618
619int should_fail_bio(struct bio *bio);
620#ifdef CONFIG_FAIL_MAKE_REQUEST
621bool should_fail_request(struct block_device *part, unsigned int bytes);
622#else /* CONFIG_FAIL_MAKE_REQUEST */
623static inline bool should_fail_request(struct block_device *part,
624 unsigned int bytes)
625{
626 return false;
627}
628#endif /* CONFIG_FAIL_MAKE_REQUEST */
629
630/*
631 * Optimized request reference counting. Ideally we'd make timeouts be more
632 * clever, as that's the only reason we need references at all... But until
633 * this happens, this is faster than using refcount_t. Also see:
634 *
635 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
636 */
637#define req_ref_zero_or_close_to_overflow(req) \
638 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
639
640static inline bool req_ref_inc_not_zero(struct request *req)
641{
642 return atomic_inc_not_zero(v: &req->ref);
643}
644
645static inline bool req_ref_put_and_test(struct request *req)
646{
647 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
648 return atomic_dec_and_test(v: &req->ref);
649}
650
651static inline void req_ref_set(struct request *req, int value)
652{
653 atomic_set(v: &req->ref, i: value);
654}
655
656static inline int req_ref_read(struct request *req)
657{
658 return atomic_read(v: &req->ref);
659}
660
661static inline u64 blk_time_get_ns(void)
662{
663 struct blk_plug *plug = current->plug;
664
665 if (!plug || !in_task())
666 return ktime_get_ns();
667
668 /*
669 * 0 could very well be a valid time, but rather than flag "this is
670 * a valid timestamp" separately, just accept that we'll do an extra
671 * ktime_get_ns() if we just happen to get 0 as the current time.
672 */
673 if (!plug->cur_ktime) {
674 plug->cur_ktime = ktime_get_ns();
675 current->flags |= PF_BLOCK_TS;
676 }
677 return plug->cur_ktime;
678}
679
680static inline ktime_t blk_time_get(void)
681{
682 return ns_to_ktime(ns: blk_time_get_ns());
683}
684
685void bdev_release(struct file *bdev_file);
686int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
687 const struct blk_holder_ops *hops, struct file *bdev_file);
688int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
689
690void blk_integrity_generate(struct bio *bio);
691void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
692void blk_integrity_prepare(struct request *rq);
693void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
694
695#ifdef CONFIG_LOCKDEP
696static inline void blk_freeze_acquire_lock(struct request_queue *q)
697{
698 if (!q->mq_freeze_disk_dead)
699 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
700 if (!q->mq_freeze_queue_dying)
701 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
702}
703
704static inline void blk_unfreeze_release_lock(struct request_queue *q)
705{
706 if (!q->mq_freeze_queue_dying)
707 rwsem_release(&q->q_lockdep_map, _RET_IP_);
708 if (!q->mq_freeze_disk_dead)
709 rwsem_release(&q->io_lockdep_map, _RET_IP_);
710}
711#else
712static inline void blk_freeze_acquire_lock(struct request_queue *q)
713{
714}
715static inline void blk_unfreeze_release_lock(struct request_queue *q)
716{
717}
718#endif
719
720#endif /* BLK_INTERNAL_H */
721