| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | 
|---|
| 2 | /* | 
|---|
| 3 | *	Berkeley style UIO structures	-	Alan Cox 1994. | 
|---|
| 4 | */ | 
|---|
| 5 | #ifndef __LINUX_UIO_H | 
|---|
| 6 | #define __LINUX_UIO_H | 
|---|
| 7 |  | 
|---|
| 8 | #include <linux/kernel.h> | 
|---|
| 9 | #include <linux/mm_types.h> | 
|---|
| 10 | #include <linux/ucopysize.h> | 
|---|
| 11 | #include <uapi/linux/uio.h> | 
|---|
| 12 |  | 
|---|
| 13 | struct page; | 
|---|
| 14 | struct folio_queue; | 
|---|
| 15 |  | 
|---|
| 16 | typedef unsigned int __bitwise ; | 
|---|
| 17 |  | 
|---|
| 18 | struct kvec { | 
|---|
| 19 | void *iov_base; /* and that should *never* hold a userland pointer */ | 
|---|
| 20 | size_t iov_len; | 
|---|
| 21 | }; | 
|---|
| 22 |  | 
|---|
| 23 | enum iter_type { | 
|---|
| 24 | /* iter types */ | 
|---|
| 25 | ITER_UBUF, | 
|---|
| 26 | ITER_IOVEC, | 
|---|
| 27 | ITER_BVEC, | 
|---|
| 28 | ITER_KVEC, | 
|---|
| 29 | ITER_FOLIOQ, | 
|---|
| 30 | ITER_XARRAY, | 
|---|
| 31 | ITER_DISCARD, | 
|---|
| 32 | }; | 
|---|
| 33 |  | 
|---|
| 34 | #define ITER_SOURCE	1	// == WRITE | 
|---|
| 35 | #define ITER_DEST	0	// == READ | 
|---|
| 36 |  | 
|---|
| 37 | struct iov_iter_state { | 
|---|
| 38 | size_t iov_offset; | 
|---|
| 39 | size_t count; | 
|---|
| 40 | unsigned long nr_segs; | 
|---|
| 41 | }; | 
|---|
| 42 |  | 
|---|
| 43 | struct iov_iter { | 
|---|
| 44 | u8 iter_type; | 
|---|
| 45 | bool nofault; | 
|---|
| 46 | bool data_source; | 
|---|
| 47 | size_t iov_offset; | 
|---|
| 48 | /* | 
|---|
| 49 | * Hack alert: overlay ubuf_iovec with iovec + count, so | 
|---|
| 50 | * that the members resolve correctly regardless of the type | 
|---|
| 51 | * of iterator used. This means that you can use: | 
|---|
| 52 | * | 
|---|
| 53 | * &iter->__ubuf_iovec or iter->__iov | 
|---|
| 54 | * | 
|---|
| 55 | * interchangably for the user_backed cases, hence simplifying | 
|---|
| 56 | * some of the cases that need to deal with both. | 
|---|
| 57 | */ | 
|---|
| 58 | union { | 
|---|
| 59 | /* | 
|---|
| 60 | * This really should be a const, but we cannot do that without | 
|---|
| 61 | * also modifying any of the zero-filling iter init functions. | 
|---|
| 62 | * Leave it non-const for now, but it should be treated as such. | 
|---|
| 63 | */ | 
|---|
| 64 | struct iovec __ubuf_iovec; | 
|---|
| 65 | struct { | 
|---|
| 66 | union { | 
|---|
| 67 | /* use iter_iov() to get the current vec */ | 
|---|
| 68 | const struct iovec *__iov; | 
|---|
| 69 | const struct kvec *kvec; | 
|---|
| 70 | const struct bio_vec *bvec; | 
|---|
| 71 | const struct folio_queue *folioq; | 
|---|
| 72 | struct xarray *xarray; | 
|---|
| 73 | void __user *ubuf; | 
|---|
| 74 | }; | 
|---|
| 75 | size_t count; | 
|---|
| 76 | }; | 
|---|
| 77 | }; | 
|---|
| 78 | union { | 
|---|
| 79 | unsigned long nr_segs; | 
|---|
| 80 | u8 folioq_slot; | 
|---|
| 81 | loff_t xarray_start; | 
|---|
| 82 | }; | 
|---|
| 83 | }; | 
|---|
| 84 |  | 
|---|
| 85 | typedef __u16 uio_meta_flags_t; | 
|---|
| 86 |  | 
|---|
| 87 | struct uio_meta { | 
|---|
| 88 | uio_meta_flags_t	flags; | 
|---|
| 89 | u16			app_tag; | 
|---|
| 90 | u64			seed; | 
|---|
| 91 | struct iov_iter		iter; | 
|---|
| 92 | }; | 
|---|
| 93 |  | 
|---|
| 94 | static inline const struct iovec *iter_iov(const struct iov_iter *iter) | 
|---|
| 95 | { | 
|---|
| 96 | if (iter->iter_type == ITER_UBUF) | 
|---|
| 97 | return (const struct iovec *) &iter->__ubuf_iovec; | 
|---|
| 98 | return iter->__iov; | 
|---|
| 99 | } | 
|---|
| 100 |  | 
|---|
| 101 | #define iter_iov_addr(iter)	(iter_iov(iter)->iov_base + (iter)->iov_offset) | 
|---|
| 102 |  | 
|---|
| 103 | static inline size_t iter_iov_len(const struct iov_iter *i) | 
|---|
| 104 | { | 
|---|
| 105 | if (i->iter_type == ITER_UBUF) | 
|---|
| 106 | return i->count; | 
|---|
| 107 | return iter_iov(iter: i)->iov_len - i->iov_offset; | 
|---|
| 108 | } | 
|---|
| 109 |  | 
|---|
| 110 | static inline enum iter_type iov_iter_type(const struct iov_iter *i) | 
|---|
| 111 | { | 
|---|
| 112 | return i->iter_type; | 
|---|
| 113 | } | 
|---|
| 114 |  | 
|---|
| 115 | static inline void iov_iter_save_state(struct iov_iter *iter, | 
|---|
| 116 | struct iov_iter_state *state) | 
|---|
| 117 | { | 
|---|
| 118 | state->iov_offset = iter->iov_offset; | 
|---|
| 119 | state->count = iter->count; | 
|---|
| 120 | state->nr_segs = iter->nr_segs; | 
|---|
| 121 | } | 
|---|
| 122 |  | 
|---|
| 123 | static inline bool iter_is_ubuf(const struct iov_iter *i) | 
|---|
| 124 | { | 
|---|
| 125 | return iov_iter_type(i) == ITER_UBUF; | 
|---|
| 126 | } | 
|---|
| 127 |  | 
|---|
| 128 | static inline bool iter_is_iovec(const struct iov_iter *i) | 
|---|
| 129 | { | 
|---|
| 130 | return iov_iter_type(i) == ITER_IOVEC; | 
|---|
| 131 | } | 
|---|
| 132 |  | 
|---|
| 133 | static inline bool iov_iter_is_kvec(const struct iov_iter *i) | 
|---|
| 134 | { | 
|---|
| 135 | return iov_iter_type(i) == ITER_KVEC; | 
|---|
| 136 | } | 
|---|
| 137 |  | 
|---|
| 138 | static inline bool iov_iter_is_bvec(const struct iov_iter *i) | 
|---|
| 139 | { | 
|---|
| 140 | return iov_iter_type(i) == ITER_BVEC; | 
|---|
| 141 | } | 
|---|
| 142 |  | 
|---|
| 143 | static inline bool iov_iter_is_discard(const struct iov_iter *i) | 
|---|
| 144 | { | 
|---|
| 145 | return iov_iter_type(i) == ITER_DISCARD; | 
|---|
| 146 | } | 
|---|
| 147 |  | 
|---|
| 148 | static inline bool iov_iter_is_folioq(const struct iov_iter *i) | 
|---|
| 149 | { | 
|---|
| 150 | return iov_iter_type(i) == ITER_FOLIOQ; | 
|---|
| 151 | } | 
|---|
| 152 |  | 
|---|
| 153 | static inline bool iov_iter_is_xarray(const struct iov_iter *i) | 
|---|
| 154 | { | 
|---|
| 155 | return iov_iter_type(i) == ITER_XARRAY; | 
|---|
| 156 | } | 
|---|
| 157 |  | 
|---|
| 158 | static inline unsigned char iov_iter_rw(const struct iov_iter *i) | 
|---|
| 159 | { | 
|---|
| 160 | return i->data_source ? WRITE : READ; | 
|---|
| 161 | } | 
|---|
| 162 |  | 
|---|
| 163 | static inline bool user_backed_iter(const struct iov_iter *i) | 
|---|
| 164 | { | 
|---|
| 165 | return iter_is_ubuf(i) || iter_is_iovec(i); | 
|---|
| 166 | } | 
|---|
| 167 |  | 
|---|
| 168 | /* | 
|---|
| 169 | * Total number of bytes covered by an iovec. | 
|---|
| 170 | * | 
|---|
| 171 | * NOTE that it is not safe to use this function until all the iovec's | 
|---|
| 172 | * segment lengths have been validated.  Because the individual lengths can | 
|---|
| 173 | * overflow a size_t when added together. | 
|---|
| 174 | */ | 
|---|
| 175 | static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) | 
|---|
| 176 | { | 
|---|
| 177 | unsigned long seg; | 
|---|
| 178 | size_t ret = 0; | 
|---|
| 179 |  | 
|---|
| 180 | for (seg = 0; seg < nr_segs; seg++) | 
|---|
| 181 | ret += iov[seg].iov_len; | 
|---|
| 182 | return ret; | 
|---|
| 183 | } | 
|---|
| 184 |  | 
|---|
| 185 | void iov_iter_advance(struct iov_iter *i, size_t bytes); | 
|---|
| 186 | void iov_iter_revert(struct iov_iter *i, size_t bytes); | 
|---|
| 187 | size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes); | 
|---|
| 188 | size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes); | 
|---|
| 189 | size_t iov_iter_single_seg_count(const struct iov_iter *i); | 
|---|
| 190 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | 
|---|
| 191 | struct iov_iter *i); | 
|---|
| 192 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | 
|---|
| 193 | struct iov_iter *i); | 
|---|
| 194 | size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset, | 
|---|
| 195 | size_t bytes, struct iov_iter *i); | 
|---|
| 196 |  | 
|---|
| 197 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); | 
|---|
| 198 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); | 
|---|
| 199 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); | 
|---|
| 200 |  | 
|---|
| 201 | static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, | 
|---|
| 202 | size_t bytes, struct iov_iter *i) | 
|---|
| 203 | { | 
|---|
| 204 | return copy_page_to_iter(page: &folio->page, offset, bytes, i); | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset, | 
|---|
| 208 | size_t bytes, struct iov_iter *i) | 
|---|
| 209 | { | 
|---|
| 210 | return copy_page_from_iter(page: &folio->page, offset, bytes, i); | 
|---|
| 211 | } | 
|---|
| 212 |  | 
|---|
| 213 | size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, | 
|---|
| 214 | size_t bytes, struct iov_iter *i); | 
|---|
| 215 |  | 
|---|
| 216 | static __always_inline __must_check | 
|---|
| 217 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) | 
|---|
| 218 | { | 
|---|
| 219 | if (check_copy_size(addr, bytes, is_source: true)) | 
|---|
| 220 | return _copy_to_iter(addr, bytes, i); | 
|---|
| 221 | return 0; | 
|---|
| 222 | } | 
|---|
| 223 |  | 
|---|
| 224 | static __always_inline __must_check | 
|---|
| 225 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | 
|---|
| 226 | { | 
|---|
| 227 | if (check_copy_size(addr, bytes, is_source: false)) | 
|---|
| 228 | return _copy_from_iter(addr, bytes, i); | 
|---|
| 229 | return 0; | 
|---|
| 230 | } | 
|---|
| 231 |  | 
|---|
| 232 | static __always_inline __must_check | 
|---|
| 233 | bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i) | 
|---|
| 234 | { | 
|---|
| 235 | size_t copied = copy_to_iter(addr, bytes, i); | 
|---|
| 236 | if (likely(copied == bytes)) | 
|---|
| 237 | return true; | 
|---|
| 238 | iov_iter_revert(i, bytes: copied); | 
|---|
| 239 | return false; | 
|---|
| 240 | } | 
|---|
| 241 |  | 
|---|
| 242 | static __always_inline __must_check | 
|---|
| 243 | bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) | 
|---|
| 244 | { | 
|---|
| 245 | size_t copied = copy_from_iter(addr, bytes, i); | 
|---|
| 246 | if (likely(copied == bytes)) | 
|---|
| 247 | return true; | 
|---|
| 248 | iov_iter_revert(i, bytes: copied); | 
|---|
| 249 | return false; | 
|---|
| 250 | } | 
|---|
| 251 |  | 
|---|
| 252 | static __always_inline __must_check | 
|---|
| 253 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | 
|---|
| 254 | { | 
|---|
| 255 | if (check_copy_size(addr, bytes, is_source: false)) | 
|---|
| 256 | return _copy_from_iter_nocache(addr, bytes, i); | 
|---|
| 257 | return 0; | 
|---|
| 258 | } | 
|---|
| 259 |  | 
|---|
| 260 | static __always_inline __must_check | 
|---|
| 261 | bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | 
|---|
| 262 | { | 
|---|
| 263 | size_t copied = copy_from_iter_nocache(addr, bytes, i); | 
|---|
| 264 | if (likely(copied == bytes)) | 
|---|
| 265 | return true; | 
|---|
| 266 | iov_iter_revert(i, bytes: copied); | 
|---|
| 267 | return false; | 
|---|
| 268 | } | 
|---|
| 269 |  | 
|---|
| 270 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE | 
|---|
| 271 | /* | 
|---|
| 272 | * Note, users like pmem that depend on the stricter semantics of | 
|---|
| 273 | * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for | 
|---|
| 274 | * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the | 
|---|
| 275 | * destination is flushed from the cache on return. | 
|---|
| 276 | */ | 
|---|
| 277 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); | 
|---|
| 278 | #else | 
|---|
| 279 | #define _copy_from_iter_flushcache _copy_from_iter_nocache | 
|---|
| 280 | #endif | 
|---|
| 281 |  | 
|---|
| 282 | #ifdef CONFIG_ARCH_HAS_COPY_MC | 
|---|
| 283 | size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); | 
|---|
| 284 | #else | 
|---|
| 285 | #define _copy_mc_to_iter _copy_to_iter | 
|---|
| 286 | #endif | 
|---|
| 287 |  | 
|---|
| 288 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); | 
|---|
| 289 | unsigned long iov_iter_alignment(const struct iov_iter *i); | 
|---|
| 290 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i); | 
|---|
| 291 | void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, | 
|---|
| 292 | unsigned long nr_segs, size_t count); | 
|---|
| 293 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, | 
|---|
| 294 | unsigned long nr_segs, size_t count); | 
|---|
| 295 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, | 
|---|
| 296 | unsigned long nr_segs, size_t count); | 
|---|
| 297 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); | 
|---|
| 298 | void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, | 
|---|
| 299 | const struct folio_queue *folioq, | 
|---|
| 300 | unsigned int first_slot, unsigned int offset, size_t count); | 
|---|
| 301 | void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, | 
|---|
| 302 | loff_t start, size_t count); | 
|---|
| 303 | ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, | 
|---|
| 304 | size_t maxsize, unsigned maxpages, size_t *start); | 
|---|
| 305 | ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, | 
|---|
| 306 | size_t maxsize, size_t *start); | 
|---|
| 307 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | 
|---|
| 308 | void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); | 
|---|
| 309 |  | 
|---|
| 310 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); | 
|---|
| 311 |  | 
|---|
| 312 | static inline size_t iov_iter_count(const struct iov_iter *i) | 
|---|
| 313 | { | 
|---|
| 314 | return i->count; | 
|---|
| 315 | } | 
|---|
| 316 |  | 
|---|
| 317 | /* | 
|---|
| 318 | * Cap the iov_iter by given limit; note that the second argument is | 
|---|
| 319 | * *not* the new size - it's upper limit for such.  Passing it a value | 
|---|
| 320 | * greater than the amount of data in iov_iter is fine - it'll just do | 
|---|
| 321 | * nothing in that case. | 
|---|
| 322 | */ | 
|---|
| 323 | static inline void iov_iter_truncate(struct iov_iter *i, u64 count) | 
|---|
| 324 | { | 
|---|
| 325 | /* | 
|---|
| 326 | * count doesn't have to fit in size_t - comparison extends both | 
|---|
| 327 | * operands to u64 here and any value that would be truncated by | 
|---|
| 328 | * conversion in assignement is by definition greater than all | 
|---|
| 329 | * values of size_t, including old i->count. | 
|---|
| 330 | */ | 
|---|
| 331 | if (i->count > count) | 
|---|
| 332 | i->count = count; | 
|---|
| 333 | } | 
|---|
| 334 |  | 
|---|
| 335 | /* | 
|---|
| 336 | * reexpand a previously truncated iterator; count must be no more than how much | 
|---|
| 337 | * we had shrunk it. | 
|---|
| 338 | */ | 
|---|
| 339 | static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) | 
|---|
| 340 | { | 
|---|
| 341 | i->count = count; | 
|---|
| 342 | } | 
|---|
| 343 |  | 
|---|
| 344 | static inline int | 
|---|
| 345 | iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) | 
|---|
| 346 | { | 
|---|
| 347 | size_t shorted = 0; | 
|---|
| 348 | int npages; | 
|---|
| 349 |  | 
|---|
| 350 | if (iov_iter_count(i) > max_bytes) { | 
|---|
| 351 | shorted = iov_iter_count(i) - max_bytes; | 
|---|
| 352 | iov_iter_truncate(i, count: max_bytes); | 
|---|
| 353 | } | 
|---|
| 354 | npages = iov_iter_npages(i, maxpages); | 
|---|
| 355 | if (shorted) | 
|---|
| 356 | iov_iter_reexpand(i, count: iov_iter_count(i) + shorted); | 
|---|
| 357 |  | 
|---|
| 358 | return npages; | 
|---|
| 359 | } | 
|---|
| 360 |  | 
|---|
| 361 | struct iovec *iovec_from_user(const struct iovec __user *uvector, | 
|---|
| 362 | unsigned long nr_segs, unsigned long fast_segs, | 
|---|
| 363 | struct iovec *fast_iov, bool compat); | 
|---|
| 364 | ssize_t import_iovec(int type, const struct iovec __user *uvec, | 
|---|
| 365 | unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, | 
|---|
| 366 | struct iov_iter *i); | 
|---|
| 367 | ssize_t __import_iovec(int type, const struct iovec __user *uvec, | 
|---|
| 368 | unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, | 
|---|
| 369 | struct iov_iter *i, bool compat); | 
|---|
| 370 | int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i); | 
|---|
| 371 |  | 
|---|
| 372 | static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, | 
|---|
| 373 | void __user *buf, size_t count) | 
|---|
| 374 | { | 
|---|
| 375 | WARN_ON(direction & ~(READ | WRITE)); | 
|---|
| 376 | *i = (struct iov_iter) { | 
|---|
| 377 | .iter_type = ITER_UBUF, | 
|---|
| 378 | .data_source = direction, | 
|---|
| 379 | .ubuf = buf, | 
|---|
| 380 | .count = count, | 
|---|
| 381 | .nr_segs = 1 | 
|---|
| 382 | }; | 
|---|
| 383 | } | 
|---|
| 384 | /* Flags for iov_iter_get/extract_pages*() */ | 
|---|
| 385 | /* Allow P2PDMA on the extracted pages */ | 
|---|
| 386 | #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01) | 
|---|
| 387 |  | 
|---|
| 388 | ssize_t (struct iov_iter *i, struct page ***pages, | 
|---|
| 389 | size_t maxsize, unsigned int maxpages, | 
|---|
| 390 | iov_iter_extraction_t , | 
|---|
| 391 | size_t *offset0); | 
|---|
| 392 |  | 
|---|
| 393 | /** | 
|---|
| 394 | * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained | 
|---|
| 395 | * @iter: The iterator | 
|---|
| 396 | * | 
|---|
| 397 | * Examine the iterator and indicate by returning true or false as to how, if | 
|---|
| 398 | * at all, pages extracted from the iterator will be retained by the extraction | 
|---|
| 399 | * function. | 
|---|
| 400 | * | 
|---|
| 401 | * %true indicates that the pages will have a pin placed in them that the | 
|---|
| 402 | * caller must unpin.  This is must be done for DMA/async DIO to force fork() | 
|---|
| 403 | * to forcibly copy a page for the child (the parent must retain the original | 
|---|
| 404 | * page). | 
|---|
| 405 | * | 
|---|
| 406 | * %false indicates that no measures are taken and that it's up to the caller | 
|---|
| 407 | * to retain the pages. | 
|---|
| 408 | */ | 
|---|
| 409 | static inline bool (const struct iov_iter *iter) | 
|---|
| 410 | { | 
|---|
| 411 | return user_backed_iter(i: iter); | 
|---|
| 412 | } | 
|---|
| 413 |  | 
|---|
| 414 | struct sg_table; | 
|---|
| 415 | ssize_t (struct iov_iter *iter, size_t len, | 
|---|
| 416 | struct sg_table *sgtable, unsigned int sg_max, | 
|---|
| 417 | iov_iter_extraction_t ); | 
|---|
| 418 |  | 
|---|
| 419 | #endif | 
|---|
| 420 |  | 
|---|