1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/ioctl.c
4 *
5 * Copyright (C) 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 */
10
11#include <linux/fs.h>
12#include <linux/capability.h>
13#include <linux/time.h>
14#include <linux/compat.h>
15#include <linux/mount.h>
16#include <linux/file.h>
17#include <linux/quotaops.h>
18#include <linux/random.h>
19#include <linux/uaccess.h>
20#include <linux/delay.h>
21#include <linux/iversion.h>
22#include <linux/fileattr.h>
23#include <linux/uuid.h>
24#include "ext4_jbd2.h"
25#include "ext4.h"
26#include <linux/fsmap.h>
27#include "fsmap.h"
28#include <trace/events/ext4.h>
29
30typedef void ext4_update_sb_callback(struct ext4_sb_info *sbi,
31 struct ext4_super_block *es,
32 const void *arg);
33
34/*
35 * Superblock modification callback function for changing file system
36 * label
37 */
38static void ext4_sb_setlabel(struct ext4_sb_info *sbi,
39 struct ext4_super_block *es, const void *arg)
40{
41 /* Sanity check, this should never happen */
42 BUILD_BUG_ON(sizeof(es->s_volume_name) < EXT4_LABEL_MAX);
43
44 memcpy(to: es->s_volume_name, from: (char *)arg, EXT4_LABEL_MAX);
45}
46
47/*
48 * Superblock modification callback function for changing file system
49 * UUID.
50 */
51static void ext4_sb_setuuid(struct ext4_sb_info *sbi,
52 struct ext4_super_block *es, const void *arg)
53{
54 memcpy(to: es->s_uuid, from: (__u8 *)arg, UUID_SIZE);
55}
56
57static
58int ext4_update_primary_sb(struct super_block *sb, handle_t *handle,
59 ext4_update_sb_callback func,
60 const void *arg)
61{
62 int err = 0;
63 struct ext4_sb_info *sbi = EXT4_SB(sb);
64 struct buffer_head *bh = sbi->s_sbh;
65 struct ext4_super_block *es = sbi->s_es;
66
67 trace_ext4_update_sb(sb, fsblk: bh->b_blocknr, flags: 1);
68
69 BUFFER_TRACE(bh, "get_write_access");
70 err = ext4_journal_get_write_access(handle, sb,
71 bh,
72 EXT4_JTR_NONE);
73 if (err)
74 goto out_err;
75
76 lock_buffer(bh);
77 func(sbi, es, arg);
78 ext4_superblock_csum_set(sb);
79 unlock_buffer(bh);
80
81 if (buffer_write_io_error(bh) || !buffer_uptodate(bh)) {
82 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
83 "superblock detected");
84 clear_buffer_write_io_error(bh);
85 set_buffer_uptodate(bh);
86 }
87
88 err = ext4_handle_dirty_metadata(handle, NULL, bh);
89 if (err)
90 goto out_err;
91 err = sync_dirty_buffer(bh);
92out_err:
93 ext4_std_error(sb, err);
94 return err;
95}
96
97/*
98 * Update one backup superblock in the group 'grp' using the callback
99 * function 'func' and argument 'arg'. If the handle is NULL the
100 * modification is not journalled.
101 *
102 * Returns: 0 when no modification was done (no superblock in the group)
103 * 1 when the modification was successful
104 * <0 on error
105 */
106static int ext4_update_backup_sb(struct super_block *sb,
107 handle_t *handle, ext4_group_t grp,
108 ext4_update_sb_callback func, const void *arg)
109{
110 int err = 0;
111 ext4_fsblk_t sb_block;
112 struct buffer_head *bh;
113 unsigned long offset = 0;
114 struct ext4_super_block *es;
115
116 if (!ext4_bg_has_super(sb, group: grp))
117 return 0;
118
119 /*
120 * For the group 0 there is always 1k padding, so we have
121 * either adjust offset, or sb_block depending on blocksize
122 */
123 if (grp == 0) {
124 sb_block = 1 * EXT4_MIN_BLOCK_SIZE;
125 offset = do_div(sb_block, sb->s_blocksize);
126 } else {
127 sb_block = ext4_group_first_block_no(sb, group_no: grp);
128 offset = 0;
129 }
130
131 trace_ext4_update_sb(sb, fsblk: sb_block, flags: handle ? 1 : 0);
132
133 bh = ext4_sb_bread(sb, block: sb_block, op_flags: 0);
134 if (IS_ERR(ptr: bh))
135 return PTR_ERR(ptr: bh);
136
137 if (handle) {
138 BUFFER_TRACE(bh, "get_write_access");
139 err = ext4_journal_get_write_access(handle, sb,
140 bh,
141 EXT4_JTR_NONE);
142 if (err)
143 goto out_bh;
144 }
145
146 es = (struct ext4_super_block *) (bh->b_data + offset);
147 lock_buffer(bh);
148 if (ext4_has_feature_metadata_csum(sb) &&
149 es->s_checksum != ext4_superblock_csum(es)) {
150 ext4_msg(sb, KERN_ERR, "Invalid checksum for backup "
151 "superblock %llu", sb_block);
152 unlock_buffer(bh);
153 goto out_bh;
154 }
155 func(EXT4_SB(sb), es, arg);
156 if (ext4_has_feature_metadata_csum(sb))
157 es->s_checksum = ext4_superblock_csum(es);
158 set_buffer_uptodate(bh);
159 unlock_buffer(bh);
160
161 if (handle) {
162 err = ext4_handle_dirty_metadata(handle, NULL, bh);
163 if (err)
164 goto out_bh;
165 } else {
166 BUFFER_TRACE(bh, "marking dirty");
167 mark_buffer_dirty(bh);
168 }
169 err = sync_dirty_buffer(bh);
170
171out_bh:
172 brelse(bh);
173 ext4_std_error(sb, err);
174 return (err) ? err : 1;
175}
176
177/*
178 * Update primary and backup superblocks using the provided function
179 * func and argument arg.
180 *
181 * Only the primary superblock and at most two backup superblock
182 * modifications are journalled; the rest is modified without journal.
183 * This is safe because e2fsck will re-write them if there is a problem,
184 * and we're very unlikely to ever need more than two backups.
185 */
186static
187int ext4_update_superblocks_fn(struct super_block *sb,
188 ext4_update_sb_callback func,
189 const void *arg)
190{
191 handle_t *handle;
192 ext4_group_t ngroups;
193 unsigned int three = 1;
194 unsigned int five = 5;
195 unsigned int seven = 7;
196 int err = 0, ret, i;
197 ext4_group_t grp, primary_grp;
198 struct ext4_sb_info *sbi = EXT4_SB(sb);
199
200 /*
201 * We can't update superblocks while the online resize is running
202 */
203 if (test_and_set_bit_lock(nr: EXT4_FLAGS_RESIZING,
204 addr: &sbi->s_ext4_flags)) {
205 ext4_msg(sb, KERN_ERR, "Can't modify superblock while"
206 "performing online resize");
207 return -EBUSY;
208 }
209
210 /*
211 * We're only going to update primary superblock and two
212 * backup superblocks in this transaction.
213 */
214 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 3);
215 if (IS_ERR(ptr: handle)) {
216 err = PTR_ERR(ptr: handle);
217 goto out;
218 }
219
220 /* Update primary superblock */
221 err = ext4_update_primary_sb(sb, handle, func, arg);
222 if (err) {
223 ext4_msg(sb, KERN_ERR, "Failed to update primary "
224 "superblock");
225 goto out_journal;
226 }
227
228 primary_grp = ext4_get_group_number(sb, block: sbi->s_sbh->b_blocknr);
229 ngroups = ext4_get_groups_count(sb);
230
231 /*
232 * Update backup superblocks. We have to start from group 0
233 * because it might not be where the primary superblock is
234 * if the fs is mounted with -o sb=<backup_sb_block>
235 */
236 i = 0;
237 grp = 0;
238 while (grp < ngroups) {
239 /* Skip primary superblock */
240 if (grp == primary_grp)
241 goto next_grp;
242
243 ret = ext4_update_backup_sb(sb, handle, grp, func, arg);
244 if (ret < 0) {
245 /* Ignore bad checksum; try to update next sb */
246 if (ret == -EFSBADCRC)
247 goto next_grp;
248 err = ret;
249 goto out_journal;
250 }
251
252 i += ret;
253 if (handle && i > 1) {
254 /*
255 * We're only journalling primary superblock and
256 * two backup superblocks; the rest is not
257 * journalled.
258 */
259 err = ext4_journal_stop(handle);
260 if (err)
261 goto out;
262 handle = NULL;
263 }
264next_grp:
265 grp = ext4_list_backups(sb, three: &three, five: &five, seven: &seven);
266 }
267
268out_journal:
269 if (handle) {
270 ret = ext4_journal_stop(handle);
271 if (ret && !err)
272 err = ret;
273 }
274out:
275 clear_bit_unlock(nr: EXT4_FLAGS_RESIZING, addr: &sbi->s_ext4_flags);
276 smp_mb__after_atomic();
277 return err ? err : 0;
278}
279
280/*
281 * Swap memory between @a and @b for @len bytes.
282 *
283 * @a: pointer to first memory area
284 * @b: pointer to second memory area
285 * @len: number of bytes to swap
286 *
287 */
288static void memswap(void *a, void *b, size_t len)
289{
290 unsigned char *ap, *bp;
291
292 ap = (unsigned char *)a;
293 bp = (unsigned char *)b;
294 while (len-- > 0) {
295 swap(*ap, *bp);
296 ap++;
297 bp++;
298 }
299}
300
301/*
302 * Swap i_data and associated attributes between @inode1 and @inode2.
303 * This function is used for the primary swap between inode1 and inode2
304 * and also to revert this primary swap in case of errors.
305 *
306 * Therefore you have to make sure, that calling this method twice
307 * will revert all changes.
308 *
309 * @inode1: pointer to first inode
310 * @inode2: pointer to second inode
311 */
312static void swap_inode_data(struct inode *inode1, struct inode *inode2)
313{
314 loff_t isize;
315 struct ext4_inode_info *ei1;
316 struct ext4_inode_info *ei2;
317 unsigned long tmp;
318 struct timespec64 ts1, ts2;
319
320 ei1 = EXT4_I(inode1);
321 ei2 = EXT4_I(inode2);
322
323 swap(inode1->i_version, inode2->i_version);
324
325 ts1 = inode_get_atime(inode: inode1);
326 ts2 = inode_get_atime(inode: inode2);
327 inode_set_atime_to_ts(inode: inode1, ts: ts2);
328 inode_set_atime_to_ts(inode: inode2, ts: ts1);
329
330 ts1 = inode_get_mtime(inode: inode1);
331 ts2 = inode_get_mtime(inode: inode2);
332 inode_set_mtime_to_ts(inode: inode1, ts: ts2);
333 inode_set_mtime_to_ts(inode: inode2, ts: ts1);
334
335 memswap(a: ei1->i_data, b: ei2->i_data, len: sizeof(ei1->i_data));
336 tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
337 ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
338 (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
339 ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
340 swap(ei1->i_disksize, ei2->i_disksize);
341 ext4_es_remove_extent(inode: inode1, lblk: 0, EXT_MAX_BLOCKS);
342 ext4_es_remove_extent(inode: inode2, lblk: 0, EXT_MAX_BLOCKS);
343
344 isize = i_size_read(inode: inode1);
345 i_size_write(inode: inode1, i_size: i_size_read(inode: inode2));
346 i_size_write(inode: inode2, i_size: isize);
347}
348
349void ext4_reset_inode_seed(struct inode *inode)
350{
351 struct ext4_inode_info *ei = EXT4_I(inode);
352 struct ext4_sb_info *sbi = EXT4_SB(sb: inode->i_sb);
353 __le32 inum = cpu_to_le32(inode->i_ino);
354 __le32 gen = cpu_to_le32(inode->i_generation);
355 __u32 csum;
356
357 if (!ext4_has_feature_metadata_csum(sb: inode->i_sb))
358 return;
359
360 csum = ext4_chksum(crc: sbi->s_csum_seed, address: (__u8 *)&inum, length: sizeof(inum));
361 ei->i_csum_seed = ext4_chksum(crc: csum, address: (__u8 *)&gen, length: sizeof(gen));
362}
363
364/*
365 * Swap the information from the given @inode and the inode
366 * EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
367 * important fields of the inodes.
368 *
369 * @sb: the super block of the filesystem
370 * @idmap: idmap of the mount the inode was found from
371 * @inode: the inode to swap with EXT4_BOOT_LOADER_INO
372 *
373 */
374static long swap_inode_boot_loader(struct super_block *sb,
375 struct mnt_idmap *idmap,
376 struct inode *inode)
377{
378 handle_t *handle;
379 int err;
380 struct inode *inode_bl;
381 struct ext4_inode_info *ei_bl;
382 qsize_t size, size_bl, diff;
383 blkcnt_t blocks;
384 unsigned short bytes;
385
386 inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO,
387 EXT4_IGET_SPECIAL | EXT4_IGET_BAD);
388 if (IS_ERR(ptr: inode_bl))
389 return PTR_ERR(ptr: inode_bl);
390 ei_bl = EXT4_I(inode_bl);
391
392 /* Protect orig inodes against a truncate and make sure,
393 * that only 1 swap_inode_boot_loader is running. */
394 lock_two_nondirectories(inode, inode_bl);
395
396 if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
397 IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
398 (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) ||
399 ext4_has_inline_data(inode)) {
400 err = -EINVAL;
401 goto journal_err_out;
402 }
403
404 if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
405 !inode_owner_or_capable(idmap, inode) ||
406 !capable(CAP_SYS_ADMIN)) {
407 err = -EPERM;
408 goto journal_err_out;
409 }
410
411 filemap_invalidate_lock(mapping: inode->i_mapping);
412 err = filemap_write_and_wait(mapping: inode->i_mapping);
413 if (err)
414 goto err_out;
415
416 err = filemap_write_and_wait(mapping: inode_bl->i_mapping);
417 if (err)
418 goto err_out;
419
420 /* Wait for all existing dio workers */
421 inode_dio_wait(inode);
422 inode_dio_wait(inode: inode_bl);
423
424 truncate_inode_pages(&inode->i_data, 0);
425 truncate_inode_pages(&inode_bl->i_data, 0);
426
427 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
428 if (IS_ERR(ptr: handle)) {
429 err = -EINVAL;
430 goto err_out;
431 }
432 ext4_fc_mark_ineligible(sb, reason: EXT4_FC_REASON_SWAP_BOOT, handle);
433
434 /* Protect extent tree against block allocations via delalloc */
435 ext4_double_down_write_data_sem(first: inode, second: inode_bl);
436
437 if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) {
438 /* this inode has never been used as a BOOT_LOADER */
439 set_nlink(inode: inode_bl, nlink: 1);
440 i_uid_write(inode: inode_bl, uid: 0);
441 i_gid_write(inode: inode_bl, gid: 0);
442 inode_bl->i_flags = 0;
443 ei_bl->i_flags = 0;
444 inode_set_iversion(inode: inode_bl, val: 1);
445 i_size_write(inode: inode_bl, i_size: 0);
446 EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
447 inode_bl->i_mode = S_IFREG;
448 if (ext4_has_feature_extents(sb)) {
449 ext4_set_inode_flag(inode: inode_bl, bit: EXT4_INODE_EXTENTS);
450 ext4_ext_tree_init(handle, inode: inode_bl);
451 } else
452 memset(s: ei_bl->i_data, c: 0, n: sizeof(ei_bl->i_data));
453 }
454
455 err = dquot_initialize(inode);
456 if (err)
457 goto err_out1;
458
459 size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
460 size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
461 diff = size - size_bl;
462 swap_inode_data(inode1: inode, inode2: inode_bl);
463
464 inode_set_ctime_current(inode);
465 inode_set_ctime_current(inode: inode_bl);
466 inode_inc_iversion(inode);
467
468 inode->i_generation = get_random_u32();
469 inode_bl->i_generation = get_random_u32();
470 ext4_reset_inode_seed(inode);
471 ext4_reset_inode_seed(inode: inode_bl);
472
473 ext4_discard_preallocations(inode);
474
475 err = ext4_mark_inode_dirty(handle, inode);
476 if (err < 0) {
477 /* No need to update quota information. */
478 ext4_warning(inode->i_sb,
479 "couldn't mark inode #%lu dirty (err %d)",
480 inode->i_ino, err);
481 /* Revert all changes: */
482 swap_inode_data(inode1: inode, inode2: inode_bl);
483 ext4_mark_inode_dirty(handle, inode);
484 goto err_out1;
485 }
486
487 blocks = inode_bl->i_blocks;
488 bytes = inode_bl->i_bytes;
489 inode_bl->i_blocks = inode->i_blocks;
490 inode_bl->i_bytes = inode->i_bytes;
491 err = ext4_mark_inode_dirty(handle, inode_bl);
492 if (err < 0) {
493 /* No need to update quota information. */
494 ext4_warning(inode_bl->i_sb,
495 "couldn't mark inode #%lu dirty (err %d)",
496 inode_bl->i_ino, err);
497 goto revert;
498 }
499
500 /* Bootloader inode should not be counted into quota information. */
501 if (diff > 0)
502 dquot_free_space(inode, nr: diff);
503 else
504 err = dquot_alloc_space(inode, nr: -1 * diff);
505
506 if (err < 0) {
507revert:
508 /* Revert all changes: */
509 inode_bl->i_blocks = blocks;
510 inode_bl->i_bytes = bytes;
511 swap_inode_data(inode1: inode, inode2: inode_bl);
512 ext4_mark_inode_dirty(handle, inode);
513 ext4_mark_inode_dirty(handle, inode_bl);
514 }
515
516err_out1:
517 ext4_journal_stop(handle);
518 ext4_double_up_write_data_sem(orig_inode: inode, donor_inode: inode_bl);
519
520err_out:
521 filemap_invalidate_unlock(mapping: inode->i_mapping);
522journal_err_out:
523 unlock_two_nondirectories(inode, inode_bl);
524 iput(inode_bl);
525 return err;
526}
527
528/*
529 * If immutable is set and we are not clearing it, we're not allowed to change
530 * anything else in the inode. Don't error out if we're only trying to set
531 * immutable on an immutable file.
532 */
533static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
534 unsigned int flags)
535{
536 struct ext4_inode_info *ei = EXT4_I(inode);
537 unsigned int oldflags = ei->i_flags;
538
539 if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL))
540 return 0;
541
542 if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL))
543 return -EPERM;
544 if (ext4_has_feature_project(sb: inode->i_sb) &&
545 __kprojid_val(projid: ei->i_projid) != new_projid)
546 return -EPERM;
547
548 return 0;
549}
550
551static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
552{
553 struct ext4_inode_info *ei = EXT4_I(inode);
554
555 if (S_ISDIR(inode->i_mode))
556 return;
557
558 if (test_opt2(inode->i_sb, DAX_NEVER) ||
559 test_opt(inode->i_sb, DAX_ALWAYS))
560 return;
561
562 if ((ei->i_flags ^ flags) & EXT4_DAX_FL)
563 d_mark_dontcache(inode);
564}
565
566static bool dax_compatible(struct inode *inode, unsigned int oldflags,
567 unsigned int flags)
568{
569 /* Allow the DAX flag to be changed on inline directories */
570 if (S_ISDIR(inode->i_mode)) {
571 flags &= ~EXT4_INLINE_DATA_FL;
572 oldflags &= ~EXT4_INLINE_DATA_FL;
573 }
574
575 if (flags & EXT4_DAX_FL) {
576 if ((oldflags & EXT4_DAX_MUT_EXCL) ||
577 ext4_test_inode_state(inode,
578 bit: EXT4_STATE_VERITY_IN_PROGRESS)) {
579 return false;
580 }
581 }
582
583 if ((flags & EXT4_DAX_MUT_EXCL) && (oldflags & EXT4_DAX_FL))
584 return false;
585
586 return true;
587}
588
589static int ext4_ioctl_setflags(struct inode *inode,
590 unsigned int flags)
591{
592 struct ext4_inode_info *ei = EXT4_I(inode);
593 handle_t *handle = NULL;
594 int err = -EPERM, migrate = 0;
595 struct ext4_iloc iloc;
596 unsigned int oldflags, mask, i;
597 struct super_block *sb = inode->i_sb;
598
599 /* Is it quota file? Do not allow user to mess with it */
600 if (ext4_is_quota_file(inode))
601 goto flags_out;
602
603 oldflags = ei->i_flags;
604 /*
605 * The JOURNAL_DATA flag can only be changed by
606 * the relevant capability.
607 */
608 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
609 if (!capable(CAP_SYS_RESOURCE))
610 goto flags_out;
611 }
612
613 if (!dax_compatible(inode, oldflags, flags)) {
614 err = -EOPNOTSUPP;
615 goto flags_out;
616 }
617
618 if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
619 migrate = 1;
620
621 if ((flags ^ oldflags) & EXT4_CASEFOLD_FL) {
622 if (!ext4_has_feature_casefold(sb)) {
623 err = -EOPNOTSUPP;
624 goto flags_out;
625 }
626
627 if (!S_ISDIR(inode->i_mode)) {
628 err = -ENOTDIR;
629 goto flags_out;
630 }
631
632 if (!ext4_empty_dir(inode)) {
633 err = -ENOTEMPTY;
634 goto flags_out;
635 }
636 }
637
638 /*
639 * Wait for all pending directio and then flush all the dirty pages
640 * for this file. The flush marks all the pages readonly, so any
641 * subsequent attempt to write to the file (particularly mmap pages)
642 * will come through the filesystem and fail.
643 */
644 if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) &&
645 (flags & EXT4_IMMUTABLE_FL)) {
646 inode_dio_wait(inode);
647 err = filemap_write_and_wait(mapping: inode->i_mapping);
648 if (err)
649 goto flags_out;
650 }
651
652 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
653 if (IS_ERR(ptr: handle)) {
654 err = PTR_ERR(ptr: handle);
655 goto flags_out;
656 }
657 if (IS_SYNC(inode))
658 ext4_handle_sync(handle);
659 err = ext4_reserve_inode_write(handle, inode, iloc: &iloc);
660 if (err)
661 goto flags_err;
662
663 ext4_dax_dontcache(inode, flags);
664
665 for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
666 if (!(mask & EXT4_FL_USER_MODIFIABLE))
667 continue;
668 /* These flags get special treatment later */
669 if (mask == EXT4_JOURNAL_DATA_FL || mask == EXT4_EXTENTS_FL)
670 continue;
671 if (mask & flags)
672 ext4_set_inode_flag(inode, bit: i);
673 else
674 ext4_clear_inode_flag(inode, bit: i);
675 }
676
677 ext4_set_inode_flags(inode, init: false);
678
679 inode_set_ctime_current(inode);
680 inode_inc_iversion(inode);
681
682 err = ext4_mark_iloc_dirty(handle, inode, iloc: &iloc);
683flags_err:
684 ext4_journal_stop(handle);
685 if (err)
686 goto flags_out;
687
688 if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
689 /*
690 * Changes to the journaling mode can cause unsafe changes to
691 * S_DAX if the inode is DAX
692 */
693 if (IS_DAX(inode)) {
694 err = -EBUSY;
695 goto flags_out;
696 }
697
698 err = ext4_change_inode_journal_flag(inode,
699 flags & EXT4_JOURNAL_DATA_FL);
700 if (err)
701 goto flags_out;
702 }
703 if (migrate) {
704 if (flags & EXT4_EXTENTS_FL)
705 err = ext4_ext_migrate(inode);
706 else
707 err = ext4_ind_migrate(inode);
708 }
709
710flags_out:
711 return err;
712}
713
714#ifdef CONFIG_QUOTA
715static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
716{
717 struct super_block *sb = inode->i_sb;
718 struct ext4_inode_info *ei = EXT4_I(inode);
719 int err, rc;
720 handle_t *handle;
721 kprojid_t kprojid;
722 struct ext4_iloc iloc;
723 struct ext4_inode *raw_inode;
724 struct dquot *transfer_to[MAXQUOTAS] = { };
725
726 if (!ext4_has_feature_project(sb)) {
727 if (projid != EXT4_DEF_PROJID)
728 return -EOPNOTSUPP;
729 else
730 return 0;
731 }
732
733 if (EXT4_INODE_SIZE(sb) <= EXT4_GOOD_OLD_INODE_SIZE)
734 return -EOPNOTSUPP;
735
736 kprojid = make_kprojid(from: &init_user_ns, projid: (projid_t)projid);
737
738 if (projid_eq(left: kprojid, EXT4_I(inode)->i_projid))
739 return 0;
740
741 err = -EPERM;
742 /* Is it quota file? Do not allow user to mess with it */
743 if (ext4_is_quota_file(inode))
744 return err;
745
746 err = dquot_initialize(inode);
747 if (err)
748 return err;
749
750 err = ext4_get_inode_loc(inode, &iloc);
751 if (err)
752 return err;
753
754 raw_inode = ext4_raw_inode(iloc: &iloc);
755 if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) {
756 err = ext4_expand_extra_isize(inode,
757 new_extra_isize: EXT4_SB(sb)->s_want_extra_isize,
758 iloc: &iloc);
759 if (err)
760 return err;
761 } else {
762 brelse(bh: iloc.bh);
763 }
764
765 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
766 EXT4_QUOTA_INIT_BLOCKS(sb) +
767 EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
768 if (IS_ERR(ptr: handle))
769 return PTR_ERR(ptr: handle);
770
771 err = ext4_reserve_inode_write(handle, inode, iloc: &iloc);
772 if (err)
773 goto out_stop;
774
775 transfer_to[PRJQUOTA] = dqget(sb, qid: make_kqid_projid(projid: kprojid));
776 if (!IS_ERR(ptr: transfer_to[PRJQUOTA])) {
777
778 /* __dquot_transfer() calls back ext4_get_inode_usage() which
779 * counts xattr inode references.
780 */
781 down_read(sem: &EXT4_I(inode)->xattr_sem);
782 err = __dquot_transfer(inode, transfer_to);
783 up_read(sem: &EXT4_I(inode)->xattr_sem);
784 dqput(dquot: transfer_to[PRJQUOTA]);
785 if (err)
786 goto out_dirty;
787 }
788
789 EXT4_I(inode)->i_projid = kprojid;
790 inode_set_ctime_current(inode);
791 inode_inc_iversion(inode);
792out_dirty:
793 rc = ext4_mark_iloc_dirty(handle, inode, iloc: &iloc);
794 if (!err)
795 err = rc;
796out_stop:
797 ext4_journal_stop(handle);
798 return err;
799}
800#else
801static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
802{
803 if (projid != EXT4_DEF_PROJID)
804 return -EOPNOTSUPP;
805 return 0;
806}
807#endif
808
809int ext4_force_shutdown(struct super_block *sb, u32 flags)
810{
811 struct ext4_sb_info *sbi = EXT4_SB(sb);
812 int ret;
813
814 if (flags > EXT4_GOING_FLAGS_NOLOGFLUSH)
815 return -EINVAL;
816
817 if (ext4_forced_shutdown(sb))
818 return 0;
819
820 ext4_msg(sb, KERN_ALERT, "shut down requested (%d)", flags);
821 trace_ext4_shutdown(sb, flags);
822
823 switch (flags) {
824 case EXT4_GOING_FLAGS_DEFAULT:
825 ret = bdev_freeze(bdev: sb->s_bdev);
826 if (ret)
827 return ret;
828 set_bit(nr: EXT4_FLAGS_SHUTDOWN, addr: &sbi->s_ext4_flags);
829 bdev_thaw(bdev: sb->s_bdev);
830 break;
831 case EXT4_GOING_FLAGS_LOGFLUSH:
832 set_bit(nr: EXT4_FLAGS_SHUTDOWN, addr: &sbi->s_ext4_flags);
833 if (sbi->s_journal && !is_journal_aborted(journal: sbi->s_journal)) {
834 (void) ext4_force_commit(sb);
835 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
836 }
837 break;
838 case EXT4_GOING_FLAGS_NOLOGFLUSH:
839 set_bit(nr: EXT4_FLAGS_SHUTDOWN, addr: &sbi->s_ext4_flags);
840 if (sbi->s_journal && !is_journal_aborted(journal: sbi->s_journal))
841 jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
842 break;
843 default:
844 return -EINVAL;
845 }
846 clear_opt(sb, DISCARD);
847 return 0;
848}
849
850static int ext4_ioctl_shutdown(struct super_block *sb, unsigned long arg)
851{
852 u32 flags;
853
854 if (!capable(CAP_SYS_ADMIN))
855 return -EPERM;
856
857 if (get_user(flags, (__u32 __user *)arg))
858 return -EFAULT;
859
860 return ext4_force_shutdown(sb, flags);
861}
862
863struct getfsmap_info {
864 struct super_block *gi_sb;
865 struct fsmap_head __user *gi_data;
866 unsigned int gi_idx;
867 __u32 gi_last_flags;
868};
869
870static int ext4_getfsmap_format(struct ext4_fsmap *xfm, void *priv)
871{
872 struct getfsmap_info *info = priv;
873 struct fsmap fm;
874
875 trace_ext4_getfsmap_mapping(sb: info->gi_sb, fsmap: xfm);
876
877 info->gi_last_flags = xfm->fmr_flags;
878 ext4_fsmap_from_internal(sb: info->gi_sb, dest: &fm, src: xfm);
879 if (copy_to_user(to: &info->gi_data->fmh_recs[info->gi_idx++], from: &fm,
880 n: sizeof(struct fsmap)))
881 return -EFAULT;
882
883 return 0;
884}
885
886static int ext4_ioc_getfsmap(struct super_block *sb,
887 struct fsmap_head __user *arg)
888{
889 struct getfsmap_info info = { NULL };
890 struct ext4_fsmap_head xhead = {0};
891 struct fsmap_head head;
892 bool aborted = false;
893 int error;
894
895 if (copy_from_user(to: &head, from: arg, n: sizeof(struct fsmap_head)))
896 return -EFAULT;
897 if (memchr_inv(s: head.fmh_reserved, c: 0, n: sizeof(head.fmh_reserved)) ||
898 memchr_inv(s: head.fmh_keys[0].fmr_reserved, c: 0,
899 n: sizeof(head.fmh_keys[0].fmr_reserved)) ||
900 memchr_inv(s: head.fmh_keys[1].fmr_reserved, c: 0,
901 n: sizeof(head.fmh_keys[1].fmr_reserved)))
902 return -EINVAL;
903 /*
904 * ext4 doesn't report file extents at all, so the only valid
905 * file offsets are the magic ones (all zeroes or all ones).
906 */
907 if (head.fmh_keys[0].fmr_offset ||
908 (head.fmh_keys[1].fmr_offset != 0 &&
909 head.fmh_keys[1].fmr_offset != -1ULL))
910 return -EINVAL;
911
912 xhead.fmh_iflags = head.fmh_iflags;
913 xhead.fmh_count = head.fmh_count;
914 ext4_fsmap_to_internal(sb, dest: &xhead.fmh_keys[0], src: &head.fmh_keys[0]);
915 ext4_fsmap_to_internal(sb, dest: &xhead.fmh_keys[1], src: &head.fmh_keys[1]);
916
917 trace_ext4_getfsmap_low_key(sb, fsmap: &xhead.fmh_keys[0]);
918 trace_ext4_getfsmap_high_key(sb, fsmap: &xhead.fmh_keys[1]);
919
920 info.gi_sb = sb;
921 info.gi_data = arg;
922 error = ext4_getfsmap(sb, head: &xhead, formatter: ext4_getfsmap_format, arg: &info);
923 if (error == EXT4_QUERY_RANGE_ABORT)
924 aborted = true;
925 else if (error)
926 return error;
927
928 /* If we didn't abort, set the "last" flag in the last fmx */
929 if (!aborted && info.gi_idx) {
930 info.gi_last_flags |= FMR_OF_LAST;
931 if (copy_to_user(to: &info.gi_data->fmh_recs[info.gi_idx - 1].fmr_flags,
932 from: &info.gi_last_flags,
933 n: sizeof(info.gi_last_flags)))
934 return -EFAULT;
935 }
936
937 /* copy back header */
938 head.fmh_entries = xhead.fmh_entries;
939 head.fmh_oflags = xhead.fmh_oflags;
940 if (copy_to_user(to: arg, from: &head, n: sizeof(struct fsmap_head)))
941 return -EFAULT;
942
943 return 0;
944}
945
946static long ext4_ioctl_group_add(struct file *file,
947 struct ext4_new_group_data *input)
948{
949 struct super_block *sb = file_inode(f: file)->i_sb;
950 int err, err2=0;
951
952 err = ext4_resize_begin(sb);
953 if (err)
954 return err;
955
956 if (ext4_has_feature_bigalloc(sb)) {
957 ext4_msg(sb, KERN_ERR,
958 "Online resizing not supported with bigalloc");
959 err = -EOPNOTSUPP;
960 goto group_add_out;
961 }
962
963 err = mnt_want_write_file(file);
964 if (err)
965 goto group_add_out;
966
967 err = ext4_group_add(sb, input);
968 if (EXT4_SB(sb)->s_journal) {
969 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
970 err2 = jbd2_journal_flush(journal: EXT4_SB(sb)->s_journal, flags: 0);
971 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
972 }
973 if (err == 0)
974 err = err2;
975 mnt_drop_write_file(file);
976 if (!err && ext4_has_group_desc_csum(sb) &&
977 test_opt(sb, INIT_INODE_TABLE))
978 err = ext4_register_li_request(sb, first_not_zeroed: input->group);
979group_add_out:
980 err2 = ext4_resize_end(sb, update_backups: false);
981 if (err == 0)
982 err = err2;
983 return err;
984}
985
986int ext4_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
987{
988 struct inode *inode = d_inode(dentry);
989 struct ext4_inode_info *ei = EXT4_I(inode);
990 u32 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
991
992 if (S_ISREG(inode->i_mode))
993 flags &= ~FS_PROJINHERIT_FL;
994
995 fileattr_fill_flags(fa, flags);
996 if (ext4_has_feature_project(sb: inode->i_sb))
997 fa->fsx_projid = from_kprojid(to: &init_user_ns, kprojid: ei->i_projid);
998
999 return 0;
1000}
1001
1002int ext4_fileattr_set(struct mnt_idmap *idmap,
1003 struct dentry *dentry, struct file_kattr *fa)
1004{
1005 struct inode *inode = d_inode(dentry);
1006 u32 flags = fa->flags;
1007 int err = -EOPNOTSUPP;
1008
1009 if (flags & ~EXT4_FL_USER_VISIBLE)
1010 goto out;
1011
1012 /*
1013 * chattr(1) grabs flags via GETFLAGS, modifies the result and
1014 * passes that to SETFLAGS. So we cannot easily make SETFLAGS
1015 * more restrictive than just silently masking off visible but
1016 * not settable flags as we always did.
1017 */
1018 flags &= EXT4_FL_USER_MODIFIABLE;
1019 if (ext4_mask_flags(mode: inode->i_mode, flags) != flags)
1020 goto out;
1021 err = ext4_ioctl_check_immutable(inode, new_projid: fa->fsx_projid, flags);
1022 if (err)
1023 goto out;
1024 err = ext4_ioctl_setflags(inode, flags);
1025 if (err)
1026 goto out;
1027 err = ext4_ioctl_setproject(inode, projid: fa->fsx_projid);
1028out:
1029 return err;
1030}
1031
1032/* So that the fiemap access checks can't overflow on 32 bit machines. */
1033#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
1034
1035static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
1036{
1037 struct fiemap fiemap;
1038 struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
1039 struct fiemap_extent_info fieinfo = { 0, };
1040 struct inode *inode = file_inode(f: filp);
1041 int error;
1042
1043 if (copy_from_user(to: &fiemap, from: ufiemap, n: sizeof(fiemap)))
1044 return -EFAULT;
1045
1046 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
1047 return -EINVAL;
1048
1049 fieinfo.fi_flags = fiemap.fm_flags;
1050 fieinfo.fi_extents_max = fiemap.fm_extent_count;
1051 fieinfo.fi_extents_start = ufiemap->fm_extents;
1052
1053 error = ext4_get_es_cache(inode, fieinfo: &fieinfo, start: fiemap.fm_start,
1054 len: fiemap.fm_length);
1055 fiemap.fm_flags = fieinfo.fi_flags;
1056 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
1057 if (copy_to_user(to: ufiemap, from: &fiemap, n: sizeof(fiemap)))
1058 error = -EFAULT;
1059
1060 return error;
1061}
1062
1063static int ext4_ioctl_checkpoint(struct file *filp, unsigned long arg)
1064{
1065 int err = 0;
1066 __u32 flags = 0;
1067 unsigned int flush_flags = 0;
1068 struct super_block *sb = file_inode(f: filp)->i_sb;
1069
1070 if (copy_from_user(to: &flags, from: (__u32 __user *)arg,
1071 n: sizeof(__u32)))
1072 return -EFAULT;
1073
1074 if (!capable(CAP_SYS_ADMIN))
1075 return -EPERM;
1076
1077 /* check for invalid bits set */
1078 if ((flags & ~EXT4_IOC_CHECKPOINT_FLAG_VALID) ||
1079 ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
1080 (flags & JBD2_JOURNAL_FLUSH_ZEROOUT)))
1081 return -EINVAL;
1082
1083 if (!EXT4_SB(sb)->s_journal)
1084 return -ENODEV;
1085
1086 if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) &&
1087 !bdev_max_discard_sectors(bdev: EXT4_SB(sb)->s_journal->j_dev))
1088 return -EOPNOTSUPP;
1089
1090 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
1091 return 0;
1092
1093 if (flags & EXT4_IOC_CHECKPOINT_FLAG_DISCARD)
1094 flush_flags |= JBD2_JOURNAL_FLUSH_DISCARD;
1095
1096 if (flags & EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT) {
1097 flush_flags |= JBD2_JOURNAL_FLUSH_ZEROOUT;
1098 pr_info_ratelimited("warning: checkpointing journal with EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT can be slow");
1099 }
1100
1101 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1102 err = jbd2_journal_flush(journal: EXT4_SB(sb)->s_journal, flags: flush_flags);
1103 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1104
1105 return err;
1106}
1107
1108static int ext4_ioctl_setlabel(struct file *filp, const char __user *user_label)
1109{
1110 size_t len;
1111 int ret = 0;
1112 char new_label[EXT4_LABEL_MAX + 1];
1113 struct super_block *sb = file_inode(f: filp)->i_sb;
1114
1115 if (!capable(CAP_SYS_ADMIN))
1116 return -EPERM;
1117
1118 /*
1119 * Copy the maximum length allowed for ext4 label with one more to
1120 * find the required terminating null byte in order to test the
1121 * label length. The on disk label doesn't need to be null terminated.
1122 */
1123 if (copy_from_user(to: new_label, from: user_label, EXT4_LABEL_MAX + 1))
1124 return -EFAULT;
1125
1126 len = strnlen(new_label, EXT4_LABEL_MAX + 1);
1127 if (len > EXT4_LABEL_MAX)
1128 return -EINVAL;
1129
1130 /*
1131 * Clear the buffer after the new label
1132 */
1133 memset(s: new_label + len, c: 0, EXT4_LABEL_MAX - len);
1134
1135 ret = mnt_want_write_file(file: filp);
1136 if (ret)
1137 return ret;
1138
1139 ret = ext4_update_superblocks_fn(sb, func: ext4_sb_setlabel, arg: new_label);
1140
1141 mnt_drop_write_file(file: filp);
1142 return ret;
1143}
1144
1145static int ext4_ioctl_getlabel(struct ext4_sb_info *sbi, char __user *user_label)
1146{
1147 char label[EXT4_LABEL_MAX + 1];
1148
1149 /*
1150 * EXT4_LABEL_MAX must always be smaller than FSLABEL_MAX because
1151 * FSLABEL_MAX must include terminating null byte, while s_volume_name
1152 * does not have to.
1153 */
1154 BUILD_BUG_ON(EXT4_LABEL_MAX >= FSLABEL_MAX);
1155
1156 lock_buffer(bh: sbi->s_sbh);
1157 memtostr_pad(label, sbi->s_es->s_volume_name);
1158 unlock_buffer(bh: sbi->s_sbh);
1159
1160 if (copy_to_user(to: user_label, from: label, n: sizeof(label)))
1161 return -EFAULT;
1162 return 0;
1163}
1164
1165static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi,
1166 struct fsuuid __user *ufsuuid)
1167{
1168 struct fsuuid fsuuid;
1169 __u8 uuid[UUID_SIZE];
1170
1171 if (copy_from_user(to: &fsuuid, from: ufsuuid, n: sizeof(fsuuid)))
1172 return -EFAULT;
1173
1174 if (fsuuid.fsu_len == 0) {
1175 fsuuid.fsu_len = UUID_SIZE;
1176 if (copy_to_user(to: &ufsuuid->fsu_len, from: &fsuuid.fsu_len,
1177 n: sizeof(fsuuid.fsu_len)))
1178 return -EFAULT;
1179 return 0;
1180 }
1181
1182 if (fsuuid.fsu_len < UUID_SIZE || fsuuid.fsu_flags != 0)
1183 return -EINVAL;
1184
1185 lock_buffer(bh: sbi->s_sbh);
1186 memcpy(to: uuid, from: sbi->s_es->s_uuid, UUID_SIZE);
1187 unlock_buffer(bh: sbi->s_sbh);
1188
1189 fsuuid.fsu_len = UUID_SIZE;
1190 if (copy_to_user(to: ufsuuid, from: &fsuuid, n: sizeof(fsuuid)) ||
1191 copy_to_user(to: &ufsuuid->fsu_uuid[0], from: uuid, UUID_SIZE))
1192 return -EFAULT;
1193 return 0;
1194}
1195
1196static int ext4_ioctl_setuuid(struct file *filp,
1197 const struct fsuuid __user *ufsuuid)
1198{
1199 int ret = 0;
1200 struct super_block *sb = file_inode(f: filp)->i_sb;
1201 struct fsuuid fsuuid;
1202 __u8 uuid[UUID_SIZE];
1203
1204 if (!capable(CAP_SYS_ADMIN))
1205 return -EPERM;
1206
1207 /*
1208 * If any checksums (group descriptors or metadata) are being used
1209 * then the checksum seed feature is required to change the UUID.
1210 */
1211 if (((ext4_has_feature_gdt_csum(sb) ||
1212 ext4_has_feature_metadata_csum(sb))
1213 && !ext4_has_feature_csum_seed(sb))
1214 || ext4_has_feature_stable_inodes(sb))
1215 return -EOPNOTSUPP;
1216
1217 if (copy_from_user(to: &fsuuid, from: ufsuuid, n: sizeof(fsuuid)))
1218 return -EFAULT;
1219
1220 if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0)
1221 return -EINVAL;
1222
1223 if (copy_from_user(to: uuid, from: &ufsuuid->fsu_uuid[0], UUID_SIZE))
1224 return -EFAULT;
1225
1226 ret = mnt_want_write_file(file: filp);
1227 if (ret)
1228 return ret;
1229
1230 ret = ext4_update_superblocks_fn(sb, func: ext4_sb_setuuid, arg: &uuid);
1231 mnt_drop_write_file(file: filp);
1232
1233 return ret;
1234}
1235
1236
1237#define TUNE_OPS_SUPPORTED (EXT4_TUNE_FL_ERRORS_BEHAVIOR | \
1238 EXT4_TUNE_FL_MNT_COUNT | EXT4_TUNE_FL_MAX_MNT_COUNT | \
1239 EXT4_TUNE_FL_CHECKINTRVAL | EXT4_TUNE_FL_LAST_CHECK_TIME | \
1240 EXT4_TUNE_FL_RESERVED_BLOCKS | EXT4_TUNE_FL_RESERVED_UID | \
1241 EXT4_TUNE_FL_RESERVED_GID | EXT4_TUNE_FL_DEFAULT_MNT_OPTS | \
1242 EXT4_TUNE_FL_DEF_HASH_ALG | EXT4_TUNE_FL_RAID_STRIDE | \
1243 EXT4_TUNE_FL_RAID_STRIPE_WIDTH | EXT4_TUNE_FL_MOUNT_OPTS | \
1244 EXT4_TUNE_FL_FEATURES | EXT4_TUNE_FL_EDIT_FEATURES | \
1245 EXT4_TUNE_FL_FORCE_FSCK | EXT4_TUNE_FL_ENCODING | \
1246 EXT4_TUNE_FL_ENCODING_FLAGS)
1247
1248#define EXT4_TUNE_SET_COMPAT_SUPP \
1249 (EXT4_FEATURE_COMPAT_DIR_INDEX | \
1250 EXT4_FEATURE_COMPAT_STABLE_INODES)
1251#define EXT4_TUNE_SET_INCOMPAT_SUPP \
1252 (EXT4_FEATURE_INCOMPAT_EXTENTS | \
1253 EXT4_FEATURE_INCOMPAT_EA_INODE | \
1254 EXT4_FEATURE_INCOMPAT_ENCRYPT | \
1255 EXT4_FEATURE_INCOMPAT_CSUM_SEED | \
1256 EXT4_FEATURE_INCOMPAT_LARGEDIR | \
1257 EXT4_FEATURE_INCOMPAT_CASEFOLD)
1258#define EXT4_TUNE_SET_RO_COMPAT_SUPP \
1259 (EXT4_FEATURE_RO_COMPAT_LARGE_FILE | \
1260 EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
1261 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
1262 EXT4_FEATURE_RO_COMPAT_PROJECT | \
1263 EXT4_FEATURE_RO_COMPAT_VERITY)
1264
1265#define EXT4_TUNE_CLEAR_COMPAT_SUPP (0)
1266#define EXT4_TUNE_CLEAR_INCOMPAT_SUPP (0)
1267#define EXT4_TUNE_CLEAR_RO_COMPAT_SUPP (0)
1268
1269#define SB_ENC_SUPP_MASK (SB_ENC_STRICT_MODE_FL | \
1270 SB_ENC_NO_COMPAT_FALLBACK_FL)
1271
1272static int ext4_ioctl_get_tune_sb(struct ext4_sb_info *sbi,
1273 struct ext4_tune_sb_params __user *params)
1274{
1275 struct ext4_tune_sb_params ret;
1276 struct ext4_super_block *es = sbi->s_es;
1277
1278 memset(s: &ret, c: 0, n: sizeof(ret));
1279 ret.set_flags = TUNE_OPS_SUPPORTED;
1280 ret.errors_behavior = le16_to_cpu(es->s_errors);
1281 ret.mnt_count = le16_to_cpu(es->s_mnt_count);
1282 ret.max_mnt_count = le16_to_cpu(es->s_max_mnt_count);
1283 ret.checkinterval = le32_to_cpu(es->s_checkinterval);
1284 ret.last_check_time = le32_to_cpu(es->s_lastcheck);
1285 ret.reserved_blocks = ext4_r_blocks_count(es);
1286 ret.blocks_count = ext4_blocks_count(es);
1287 ret.reserved_uid = ext4_get_resuid(es);
1288 ret.reserved_gid = ext4_get_resgid(es);
1289 ret.default_mnt_opts = le32_to_cpu(es->s_default_mount_opts);
1290 ret.def_hash_alg = es->s_def_hash_version;
1291 ret.raid_stride = le16_to_cpu(es->s_raid_stride);
1292 ret.raid_stripe_width = le32_to_cpu(es->s_raid_stripe_width);
1293 ret.encoding = le16_to_cpu(es->s_encoding);
1294 ret.encoding_flags = le16_to_cpu(es->s_encoding_flags);
1295 strscpy_pad(ret.mount_opts, es->s_mount_opts);
1296 ret.feature_compat = le32_to_cpu(es->s_feature_compat);
1297 ret.feature_incompat = le32_to_cpu(es->s_feature_incompat);
1298 ret.feature_ro_compat = le32_to_cpu(es->s_feature_ro_compat);
1299 ret.set_feature_compat_mask = EXT4_TUNE_SET_COMPAT_SUPP;
1300 ret.set_feature_incompat_mask = EXT4_TUNE_SET_INCOMPAT_SUPP;
1301 ret.set_feature_ro_compat_mask = EXT4_TUNE_SET_RO_COMPAT_SUPP;
1302 ret.clear_feature_compat_mask = EXT4_TUNE_CLEAR_COMPAT_SUPP;
1303 ret.clear_feature_incompat_mask = EXT4_TUNE_CLEAR_INCOMPAT_SUPP;
1304 ret.clear_feature_ro_compat_mask = EXT4_TUNE_CLEAR_RO_COMPAT_SUPP;
1305 if (copy_to_user(to: params, from: &ret, n: sizeof(ret)))
1306 return -EFAULT;
1307 return 0;
1308}
1309
1310static void ext4_sb_setparams(struct ext4_sb_info *sbi,
1311 struct ext4_super_block *es, const void *arg)
1312{
1313 const struct ext4_tune_sb_params *params = arg;
1314
1315 if (params->set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR)
1316 es->s_errors = cpu_to_le16(params->errors_behavior);
1317 if (params->set_flags & EXT4_TUNE_FL_MNT_COUNT)
1318 es->s_mnt_count = cpu_to_le16(params->mnt_count);
1319 if (params->set_flags & EXT4_TUNE_FL_MAX_MNT_COUNT)
1320 es->s_max_mnt_count = cpu_to_le16(params->max_mnt_count);
1321 if (params->set_flags & EXT4_TUNE_FL_CHECKINTRVAL)
1322 es->s_checkinterval = cpu_to_le32(params->checkinterval);
1323 if (params->set_flags & EXT4_TUNE_FL_LAST_CHECK_TIME)
1324 es->s_lastcheck = cpu_to_le32(params->last_check_time);
1325 if (params->set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) {
1326 ext4_fsblk_t blk = params->reserved_blocks;
1327
1328 es->s_r_blocks_count_lo = cpu_to_le32((u32)blk);
1329 es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
1330 }
1331 if (params->set_flags & EXT4_TUNE_FL_RESERVED_UID) {
1332 int uid = params->reserved_uid;
1333
1334 es->s_def_resuid = cpu_to_le16(uid & 0xFFFF);
1335 es->s_def_resuid_hi = cpu_to_le16(uid >> 16);
1336 }
1337 if (params->set_flags & EXT4_TUNE_FL_RESERVED_GID) {
1338 int gid = params->reserved_gid;
1339
1340 es->s_def_resgid = cpu_to_le16(gid & 0xFFFF);
1341 es->s_def_resgid_hi = cpu_to_le16(gid >> 16);
1342 }
1343 if (params->set_flags & EXT4_TUNE_FL_DEFAULT_MNT_OPTS)
1344 es->s_default_mount_opts = cpu_to_le32(params->default_mnt_opts);
1345 if (params->set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
1346 es->s_def_hash_version = params->def_hash_alg;
1347 if (params->set_flags & EXT4_TUNE_FL_RAID_STRIDE)
1348 es->s_raid_stride = cpu_to_le16(params->raid_stride);
1349 if (params->set_flags & EXT4_TUNE_FL_RAID_STRIPE_WIDTH)
1350 es->s_raid_stripe_width =
1351 cpu_to_le32(params->raid_stripe_width);
1352 if (params->set_flags & EXT4_TUNE_FL_ENCODING)
1353 es->s_encoding = cpu_to_le16(params->encoding);
1354 if (params->set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)
1355 es->s_encoding_flags = cpu_to_le16(params->encoding_flags);
1356 strscpy_pad(es->s_mount_opts, params->mount_opts);
1357 if (params->set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
1358 es->s_feature_compat |=
1359 cpu_to_le32(params->set_feature_compat_mask);
1360 es->s_feature_incompat |=
1361 cpu_to_le32(params->set_feature_incompat_mask);
1362 es->s_feature_ro_compat |=
1363 cpu_to_le32(params->set_feature_ro_compat_mask);
1364 es->s_feature_compat &=
1365 ~cpu_to_le32(params->clear_feature_compat_mask);
1366 es->s_feature_incompat &=
1367 ~cpu_to_le32(params->clear_feature_incompat_mask);
1368 es->s_feature_ro_compat &=
1369 ~cpu_to_le32(params->clear_feature_ro_compat_mask);
1370 if (params->set_feature_compat_mask &
1371 EXT4_FEATURE_COMPAT_DIR_INDEX)
1372 es->s_def_hash_version = sbi->s_def_hash_version;
1373 if (params->set_feature_incompat_mask &
1374 EXT4_FEATURE_INCOMPAT_CSUM_SEED)
1375 es->s_checksum_seed = cpu_to_le32(sbi->s_csum_seed);
1376 }
1377 if (params->set_flags & EXT4_TUNE_FL_FORCE_FSCK)
1378 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
1379}
1380
1381static int ext4_ioctl_set_tune_sb(struct file *filp,
1382 struct ext4_tune_sb_params __user *in)
1383{
1384 struct ext4_tune_sb_params params;
1385 struct super_block *sb = file_inode(f: filp)->i_sb;
1386 struct ext4_sb_info *sbi = EXT4_SB(sb);
1387 struct ext4_super_block *es = sbi->s_es;
1388 int enabling_casefold = 0;
1389 int ret;
1390
1391 if (!capable(CAP_SYS_ADMIN))
1392 return -EPERM;
1393
1394 if (copy_from_user(to: &params, from: in, n: sizeof(params)))
1395 return -EFAULT;
1396
1397 if ((params.set_flags & ~TUNE_OPS_SUPPORTED) != 0)
1398 return -EOPNOTSUPP;
1399
1400 if ((params.set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) &&
1401 (params.errors_behavior > EXT4_ERRORS_PANIC))
1402 return -EINVAL;
1403
1404 if ((params.set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) &&
1405 (params.reserved_blocks > ext4_blocks_count(es: sbi->s_es) / 2))
1406 return -EINVAL;
1407 if ((params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) &&
1408 ((params.def_hash_alg > DX_HASH_LAST) ||
1409 (params.def_hash_alg == DX_HASH_SIPHASH)))
1410 return -EINVAL;
1411 if ((params.set_flags & EXT4_TUNE_FL_FEATURES) &&
1412 (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES))
1413 return -EINVAL;
1414
1415 if (params.set_flags & EXT4_TUNE_FL_FEATURES) {
1416 params.set_feature_compat_mask =
1417 params.feature_compat &
1418 ~le32_to_cpu(es->s_feature_compat);
1419 params.set_feature_incompat_mask =
1420 params.feature_incompat &
1421 ~le32_to_cpu(es->s_feature_incompat);
1422 params.set_feature_ro_compat_mask =
1423 params.feature_ro_compat &
1424 ~le32_to_cpu(es->s_feature_ro_compat);
1425 params.clear_feature_compat_mask =
1426 ~params.feature_compat &
1427 le32_to_cpu(es->s_feature_compat);
1428 params.clear_feature_incompat_mask =
1429 ~params.feature_incompat &
1430 le32_to_cpu(es->s_feature_incompat);
1431 params.clear_feature_ro_compat_mask =
1432 ~params.feature_ro_compat &
1433 le32_to_cpu(es->s_feature_ro_compat);
1434 params.set_flags |= EXT4_TUNE_FL_EDIT_FEATURES;
1435 }
1436 if (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES) {
1437 if ((params.set_feature_compat_mask &
1438 ~EXT4_TUNE_SET_COMPAT_SUPP) ||
1439 (params.set_feature_incompat_mask &
1440 ~EXT4_TUNE_SET_INCOMPAT_SUPP) ||
1441 (params.set_feature_ro_compat_mask &
1442 ~EXT4_TUNE_SET_RO_COMPAT_SUPP) ||
1443 (params.clear_feature_compat_mask &
1444 ~EXT4_TUNE_CLEAR_COMPAT_SUPP) ||
1445 (params.clear_feature_incompat_mask &
1446 ~EXT4_TUNE_CLEAR_INCOMPAT_SUPP) ||
1447 (params.clear_feature_ro_compat_mask &
1448 ~EXT4_TUNE_CLEAR_RO_COMPAT_SUPP))
1449 return -EOPNOTSUPP;
1450
1451 /*
1452 * Filter out the features that are already set from
1453 * the set_mask.
1454 */
1455 params.set_feature_compat_mask &=
1456 ~le32_to_cpu(es->s_feature_compat);
1457 params.set_feature_incompat_mask &=
1458 ~le32_to_cpu(es->s_feature_incompat);
1459 params.set_feature_ro_compat_mask &=
1460 ~le32_to_cpu(es->s_feature_ro_compat);
1461 if ((params.set_feature_incompat_mask &
1462 EXT4_FEATURE_INCOMPAT_CASEFOLD)) {
1463 enabling_casefold = 1;
1464 if (!(params.set_flags & EXT4_TUNE_FL_ENCODING)) {
1465 params.encoding = EXT4_ENC_UTF8_12_1;
1466 params.set_flags |= EXT4_TUNE_FL_ENCODING;
1467 }
1468 if (!(params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)) {
1469 params.encoding_flags = 0;
1470 params.set_flags |= EXT4_TUNE_FL_ENCODING_FLAGS;
1471 }
1472 }
1473 if ((params.set_feature_compat_mask &
1474 EXT4_FEATURE_COMPAT_DIR_INDEX)) {
1475 uuid_t uu;
1476
1477 memcpy(to: &uu, from: sbi->s_hash_seed, UUID_SIZE);
1478 if (uuid_is_null(uuid: &uu))
1479 generate_random_uuid(uuid: (char *)
1480 &sbi->s_hash_seed);
1481 if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
1482 sbi->s_def_hash_version = params.def_hash_alg;
1483 else if (sbi->s_def_hash_version == 0)
1484 sbi->s_def_hash_version = DX_HASH_HALF_MD4;
1485 if (!(es->s_flags &
1486 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH)) &&
1487 !(es->s_flags &
1488 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH))) {
1489#ifdef __CHAR_UNSIGNED__
1490 sbi->s_hash_unsigned = 3;
1491#else
1492 sbi->s_hash_unsigned = 0;
1493#endif
1494 }
1495 }
1496 }
1497 if (params.set_flags & EXT4_TUNE_FL_ENCODING) {
1498 if (!enabling_casefold)
1499 return -EINVAL;
1500 if (params.encoding == 0)
1501 params.encoding = EXT4_ENC_UTF8_12_1;
1502 else if (params.encoding != EXT4_ENC_UTF8_12_1)
1503 return -EINVAL;
1504 }
1505 if (params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) {
1506 if (!enabling_casefold)
1507 return -EINVAL;
1508 if (params.encoding_flags & ~SB_ENC_SUPP_MASK)
1509 return -EINVAL;
1510 }
1511
1512 ret = mnt_want_write_file(file: filp);
1513 if (ret)
1514 return ret;
1515
1516 ret = ext4_update_superblocks_fn(sb, func: ext4_sb_setparams, arg: &params);
1517 mnt_drop_write_file(file: filp);
1518
1519 if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG)
1520 sbi->s_def_hash_version = params.def_hash_alg;
1521
1522 return ret;
1523}
1524
1525static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1526{
1527 struct inode *inode = file_inode(f: filp);
1528 struct super_block *sb = inode->i_sb;
1529 struct mnt_idmap *idmap = file_mnt_idmap(file: filp);
1530
1531 ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
1532
1533 switch (cmd) {
1534 case FS_IOC_GETFSMAP:
1535 return ext4_ioc_getfsmap(sb, arg: (void __user *)arg);
1536 case EXT4_IOC_GETVERSION:
1537 case EXT4_IOC_GETVERSION_OLD:
1538 return put_user(inode->i_generation, (int __user *) arg);
1539 case EXT4_IOC_SETVERSION:
1540 case EXT4_IOC_SETVERSION_OLD: {
1541 handle_t *handle;
1542 struct ext4_iloc iloc;
1543 __u32 generation;
1544 int err;
1545
1546 if (!inode_owner_or_capable(idmap, inode))
1547 return -EPERM;
1548
1549 if (ext4_has_feature_metadata_csum(sb: inode->i_sb)) {
1550 ext4_warning(sb, "Setting inode version is not "
1551 "supported with metadata_csum enabled.");
1552 return -ENOTTY;
1553 }
1554
1555 err = mnt_want_write_file(file: filp);
1556 if (err)
1557 return err;
1558 if (get_user(generation, (int __user *) arg)) {
1559 err = -EFAULT;
1560 goto setversion_out;
1561 }
1562
1563 inode_lock(inode);
1564 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
1565 if (IS_ERR(ptr: handle)) {
1566 err = PTR_ERR(ptr: handle);
1567 goto unlock_out;
1568 }
1569 err = ext4_reserve_inode_write(handle, inode, iloc: &iloc);
1570 if (err == 0) {
1571 inode_set_ctime_current(inode);
1572 inode_inc_iversion(inode);
1573 inode->i_generation = generation;
1574 err = ext4_mark_iloc_dirty(handle, inode, iloc: &iloc);
1575 }
1576 ext4_journal_stop(handle);
1577
1578unlock_out:
1579 inode_unlock(inode);
1580setversion_out:
1581 mnt_drop_write_file(file: filp);
1582 return err;
1583 }
1584 case EXT4_IOC_GROUP_EXTEND: {
1585 ext4_fsblk_t n_blocks_count;
1586 int err, err2=0;
1587
1588 err = ext4_resize_begin(sb);
1589 if (err)
1590 return err;
1591
1592 if (get_user(n_blocks_count, (__u32 __user *)arg)) {
1593 err = -EFAULT;
1594 goto group_extend_out;
1595 }
1596
1597 if (ext4_has_feature_bigalloc(sb)) {
1598 ext4_msg(sb, KERN_ERR,
1599 "Online resizing not supported with bigalloc");
1600 err = -EOPNOTSUPP;
1601 goto group_extend_out;
1602 }
1603
1604 err = mnt_want_write_file(file: filp);
1605 if (err)
1606 goto group_extend_out;
1607
1608 err = ext4_group_extend(sb, es: EXT4_SB(sb)->s_es, n_blocks_count);
1609 if (EXT4_SB(sb)->s_journal) {
1610 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1611 err2 = jbd2_journal_flush(journal: EXT4_SB(sb)->s_journal, flags: 0);
1612 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1613 }
1614 if (err == 0)
1615 err = err2;
1616 mnt_drop_write_file(file: filp);
1617group_extend_out:
1618 err2 = ext4_resize_end(sb, update_backups: false);
1619 if (err == 0)
1620 err = err2;
1621 return err;
1622 }
1623
1624 case EXT4_IOC_MOVE_EXT: {
1625 struct move_extent me;
1626 int err;
1627
1628 if (!(filp->f_mode & FMODE_READ) ||
1629 !(filp->f_mode & FMODE_WRITE))
1630 return -EBADF;
1631
1632 if (copy_from_user(to: &me,
1633 from: (struct move_extent __user *)arg, n: sizeof(me)))
1634 return -EFAULT;
1635 me.moved_len = 0;
1636
1637 CLASS(fd, donor)(fd: me.donor_fd);
1638 if (fd_empty(f: donor))
1639 return -EBADF;
1640
1641 if (!(fd_file(donor)->f_mode & FMODE_WRITE))
1642 return -EBADF;
1643
1644 if (ext4_has_feature_bigalloc(sb)) {
1645 ext4_msg(sb, KERN_ERR,
1646 "Online defrag not supported with bigalloc");
1647 return -EOPNOTSUPP;
1648 } else if (IS_DAX(inode)) {
1649 ext4_msg(sb, KERN_ERR,
1650 "Online defrag not supported with DAX");
1651 return -EOPNOTSUPP;
1652 }
1653
1654 err = mnt_want_write_file(file: filp);
1655 if (err)
1656 return err;
1657
1658 err = ext4_move_extents(o_filp: filp, fd_file(donor), start_orig: me.orig_start,
1659 start_donor: me.donor_start, len: me.len, moved_len: &me.moved_len);
1660 mnt_drop_write_file(file: filp);
1661
1662 if (copy_to_user(to: (struct move_extent __user *)arg,
1663 from: &me, n: sizeof(me)))
1664 err = -EFAULT;
1665 return err;
1666 }
1667
1668 case EXT4_IOC_GROUP_ADD: {
1669 struct ext4_new_group_data input;
1670
1671 if (copy_from_user(to: &input, from: (struct ext4_new_group_input __user *)arg,
1672 n: sizeof(input)))
1673 return -EFAULT;
1674
1675 return ext4_ioctl_group_add(file: filp, input: &input);
1676 }
1677
1678 case EXT4_IOC_MIGRATE:
1679 {
1680 int err;
1681 if (!inode_owner_or_capable(idmap, inode))
1682 return -EACCES;
1683
1684 err = mnt_want_write_file(file: filp);
1685 if (err)
1686 return err;
1687 /*
1688 * inode_mutex prevent write and truncate on the file.
1689 * Read still goes through. We take i_data_sem in
1690 * ext4_ext_swap_inode_data before we switch the
1691 * inode format to prevent read.
1692 */
1693 inode_lock((inode));
1694 err = ext4_ext_migrate(inode);
1695 inode_unlock((inode));
1696 mnt_drop_write_file(file: filp);
1697 return err;
1698 }
1699
1700 case EXT4_IOC_ALLOC_DA_BLKS:
1701 {
1702 int err;
1703 if (!inode_owner_or_capable(idmap, inode))
1704 return -EACCES;
1705
1706 err = mnt_want_write_file(file: filp);
1707 if (err)
1708 return err;
1709 err = ext4_alloc_da_blocks(inode);
1710 mnt_drop_write_file(file: filp);
1711 return err;
1712 }
1713
1714 case EXT4_IOC_SWAP_BOOT:
1715 {
1716 int err;
1717 if (!(filp->f_mode & FMODE_WRITE))
1718 return -EBADF;
1719 err = mnt_want_write_file(file: filp);
1720 if (err)
1721 return err;
1722 err = swap_inode_boot_loader(sb, idmap, inode);
1723 mnt_drop_write_file(file: filp);
1724 return err;
1725 }
1726
1727 case EXT4_IOC_RESIZE_FS: {
1728 ext4_fsblk_t n_blocks_count;
1729 int err = 0, err2 = 0;
1730 ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;
1731
1732 if (copy_from_user(to: &n_blocks_count, from: (__u64 __user *)arg,
1733 n: sizeof(__u64))) {
1734 return -EFAULT;
1735 }
1736
1737 err = ext4_resize_begin(sb);
1738 if (err)
1739 return err;
1740
1741 err = mnt_want_write_file(file: filp);
1742 if (err)
1743 goto resizefs_out;
1744
1745 err = ext4_resize_fs(sb, n_blocks_count);
1746 if (EXT4_SB(sb)->s_journal) {
1747 ext4_fc_mark_ineligible(sb, reason: EXT4_FC_REASON_RESIZE, NULL);
1748 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
1749 err2 = jbd2_journal_flush(journal: EXT4_SB(sb)->s_journal, flags: 0);
1750 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
1751 }
1752 if (err == 0)
1753 err = err2;
1754 mnt_drop_write_file(file: filp);
1755 if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
1756 ext4_has_group_desc_csum(sb) &&
1757 test_opt(sb, INIT_INODE_TABLE))
1758 err = ext4_register_li_request(sb, first_not_zeroed: o_group);
1759
1760resizefs_out:
1761 err2 = ext4_resize_end(sb, update_backups: true);
1762 if (err == 0)
1763 err = err2;
1764 return err;
1765 }
1766
1767 case FITRIM:
1768 {
1769 struct fstrim_range range;
1770 int ret = 0;
1771
1772 if (!capable(CAP_SYS_ADMIN))
1773 return -EPERM;
1774
1775 if (!bdev_max_discard_sectors(bdev: sb->s_bdev))
1776 return -EOPNOTSUPP;
1777
1778 /*
1779 * We haven't replayed the journal, so we cannot use our
1780 * block-bitmap-guided storage zapping commands.
1781 */
1782 if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
1783 return -EROFS;
1784
1785 if (copy_from_user(to: &range, from: (struct fstrim_range __user *)arg,
1786 n: sizeof(range)))
1787 return -EFAULT;
1788
1789 ret = ext4_trim_fs(sb, &range);
1790 if (ret < 0)
1791 return ret;
1792
1793 if (copy_to_user(to: (struct fstrim_range __user *)arg, from: &range,
1794 n: sizeof(range)))
1795 return -EFAULT;
1796
1797 return 0;
1798 }
1799 case EXT4_IOC_PRECACHE_EXTENTS:
1800 {
1801 int ret;
1802
1803 inode_lock_shared(inode);
1804 ret = ext4_ext_precache(inode);
1805 inode_unlock_shared(inode);
1806 return ret;
1807 }
1808 case FS_IOC_SET_ENCRYPTION_POLICY:
1809 if (!ext4_has_feature_encrypt(sb))
1810 return -EOPNOTSUPP;
1811 return fscrypt_ioctl_set_policy(filp, arg: (const void __user *)arg);
1812
1813 case FS_IOC_GET_ENCRYPTION_PWSALT:
1814 return ext4_ioctl_get_encryption_pwsalt(filp, arg: (void __user *)arg);
1815
1816 case FS_IOC_GET_ENCRYPTION_POLICY:
1817 if (!ext4_has_feature_encrypt(sb))
1818 return -EOPNOTSUPP;
1819 return fscrypt_ioctl_get_policy(filp, arg: (void __user *)arg);
1820
1821 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
1822 if (!ext4_has_feature_encrypt(sb))
1823 return -EOPNOTSUPP;
1824 return fscrypt_ioctl_get_policy_ex(filp, arg: (void __user *)arg);
1825
1826 case FS_IOC_ADD_ENCRYPTION_KEY:
1827 if (!ext4_has_feature_encrypt(sb))
1828 return -EOPNOTSUPP;
1829 return fscrypt_ioctl_add_key(filp, arg: (void __user *)arg);
1830
1831 case FS_IOC_REMOVE_ENCRYPTION_KEY:
1832 if (!ext4_has_feature_encrypt(sb))
1833 return -EOPNOTSUPP;
1834 return fscrypt_ioctl_remove_key(filp, arg: (void __user *)arg);
1835
1836 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
1837 if (!ext4_has_feature_encrypt(sb))
1838 return -EOPNOTSUPP;
1839 return fscrypt_ioctl_remove_key_all_users(filp,
1840 arg: (void __user *)arg);
1841 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
1842 if (!ext4_has_feature_encrypt(sb))
1843 return -EOPNOTSUPP;
1844 return fscrypt_ioctl_get_key_status(filp, arg: (void __user *)arg);
1845
1846 case FS_IOC_GET_ENCRYPTION_NONCE:
1847 if (!ext4_has_feature_encrypt(sb))
1848 return -EOPNOTSUPP;
1849 return fscrypt_ioctl_get_nonce(filp, arg: (void __user *)arg);
1850
1851 case EXT4_IOC_CLEAR_ES_CACHE:
1852 {
1853 if (!inode_owner_or_capable(idmap, inode))
1854 return -EACCES;
1855 ext4_clear_inode_es(inode);
1856 return 0;
1857 }
1858
1859 case EXT4_IOC_GETSTATE:
1860 {
1861 __u32 state = 0;
1862
1863 if (ext4_test_inode_state(inode, bit: EXT4_STATE_EXT_PRECACHED))
1864 state |= EXT4_STATE_FLAG_EXT_PRECACHED;
1865 if (ext4_test_inode_state(inode, bit: EXT4_STATE_NEW))
1866 state |= EXT4_STATE_FLAG_NEW;
1867 if (ext4_test_inode_state(inode, bit: EXT4_STATE_NEWENTRY))
1868 state |= EXT4_STATE_FLAG_NEWENTRY;
1869 if (ext4_test_inode_state(inode, bit: EXT4_STATE_DA_ALLOC_CLOSE))
1870 state |= EXT4_STATE_FLAG_DA_ALLOC_CLOSE;
1871
1872 return put_user(state, (__u32 __user *) arg);
1873 }
1874
1875 case EXT4_IOC_GET_ES_CACHE:
1876 return ext4_ioctl_get_es_cache(filp, arg);
1877
1878 case EXT4_IOC_SHUTDOWN:
1879 return ext4_ioctl_shutdown(sb, arg);
1880
1881 case FS_IOC_ENABLE_VERITY:
1882 if (!ext4_has_feature_verity(sb))
1883 return -EOPNOTSUPP;
1884 return fsverity_ioctl_enable(filp, arg: (const void __user *)arg);
1885
1886 case FS_IOC_MEASURE_VERITY:
1887 if (!ext4_has_feature_verity(sb))
1888 return -EOPNOTSUPP;
1889 return fsverity_ioctl_measure(filp, arg: (void __user *)arg);
1890
1891 case FS_IOC_READ_VERITY_METADATA:
1892 if (!ext4_has_feature_verity(sb))
1893 return -EOPNOTSUPP;
1894 return fsverity_ioctl_read_metadata(filp,
1895 uarg: (const void __user *)arg);
1896
1897 case EXT4_IOC_CHECKPOINT:
1898 return ext4_ioctl_checkpoint(filp, arg);
1899
1900 case FS_IOC_GETFSLABEL:
1901 return ext4_ioctl_getlabel(sbi: EXT4_SB(sb), user_label: (void __user *)arg);
1902
1903 case FS_IOC_SETFSLABEL:
1904 return ext4_ioctl_setlabel(filp,
1905 user_label: (const void __user *)arg);
1906
1907 case EXT4_IOC_GETFSUUID:
1908 return ext4_ioctl_getuuid(sbi: EXT4_SB(sb), ufsuuid: (void __user *)arg);
1909 case EXT4_IOC_SETFSUUID:
1910 return ext4_ioctl_setuuid(filp, ufsuuid: (const void __user *)arg);
1911 case EXT4_IOC_GET_TUNE_SB_PARAM:
1912 return ext4_ioctl_get_tune_sb(sbi: EXT4_SB(sb),
1913 params: (void __user *)arg);
1914 case EXT4_IOC_SET_TUNE_SB_PARAM:
1915 return ext4_ioctl_set_tune_sb(filp, in: (void __user *)arg);
1916 default:
1917 return -ENOTTY;
1918 }
1919}
1920
1921long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1922{
1923 return __ext4_ioctl(filp, cmd, arg);
1924}
1925
1926#ifdef CONFIG_COMPAT
1927long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1928{
1929 /* These are just misnamed, they actually get/put from/to user an int */
1930 switch (cmd) {
1931 case EXT4_IOC32_GETVERSION:
1932 cmd = EXT4_IOC_GETVERSION;
1933 break;
1934 case EXT4_IOC32_SETVERSION:
1935 cmd = EXT4_IOC_SETVERSION;
1936 break;
1937 case EXT4_IOC32_GROUP_EXTEND:
1938 cmd = EXT4_IOC_GROUP_EXTEND;
1939 break;
1940 case EXT4_IOC32_GETVERSION_OLD:
1941 cmd = EXT4_IOC_GETVERSION_OLD;
1942 break;
1943 case EXT4_IOC32_SETVERSION_OLD:
1944 cmd = EXT4_IOC_SETVERSION_OLD;
1945 break;
1946 case EXT4_IOC32_GETRSVSZ:
1947 cmd = EXT4_IOC_GETRSVSZ;
1948 break;
1949 case EXT4_IOC32_SETRSVSZ:
1950 cmd = EXT4_IOC_SETRSVSZ;
1951 break;
1952 case EXT4_IOC32_GROUP_ADD: {
1953 struct compat_ext4_new_group_input __user *uinput;
1954 struct ext4_new_group_data input;
1955 int err;
1956
1957 uinput = compat_ptr(uptr: arg);
1958 err = get_user(input.group, &uinput->group);
1959 err |= get_user(input.block_bitmap, &uinput->block_bitmap);
1960 err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
1961 err |= get_user(input.inode_table, &uinput->inode_table);
1962 err |= get_user(input.blocks_count, &uinput->blocks_count);
1963 err |= get_user(input.reserved_blocks,
1964 &uinput->reserved_blocks);
1965 if (err)
1966 return -EFAULT;
1967 return ext4_ioctl_group_add(file, input: &input);
1968 }
1969 case EXT4_IOC_MOVE_EXT:
1970 case EXT4_IOC_RESIZE_FS:
1971 case FITRIM:
1972 case EXT4_IOC_PRECACHE_EXTENTS:
1973 case FS_IOC_SET_ENCRYPTION_POLICY:
1974 case FS_IOC_GET_ENCRYPTION_PWSALT:
1975 case FS_IOC_GET_ENCRYPTION_POLICY:
1976 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
1977 case FS_IOC_ADD_ENCRYPTION_KEY:
1978 case FS_IOC_REMOVE_ENCRYPTION_KEY:
1979 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
1980 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
1981 case FS_IOC_GET_ENCRYPTION_NONCE:
1982 case EXT4_IOC_SHUTDOWN:
1983 case FS_IOC_GETFSMAP:
1984 case FS_IOC_ENABLE_VERITY:
1985 case FS_IOC_MEASURE_VERITY:
1986 case FS_IOC_READ_VERITY_METADATA:
1987 case EXT4_IOC_CLEAR_ES_CACHE:
1988 case EXT4_IOC_GETSTATE:
1989 case EXT4_IOC_GET_ES_CACHE:
1990 case EXT4_IOC_CHECKPOINT:
1991 case FS_IOC_GETFSLABEL:
1992 case FS_IOC_SETFSLABEL:
1993 case EXT4_IOC_GETFSUUID:
1994 case EXT4_IOC_SETFSUUID:
1995 break;
1996 default:
1997 return -ENOIOCTLCMD;
1998 }
1999 return ext4_ioctl(filp: file, cmd, arg: (unsigned long) compat_ptr(uptr: arg));
2000}
2001#endif
2002
2003static void set_overhead(struct ext4_sb_info *sbi,
2004 struct ext4_super_block *es, const void *arg)
2005{
2006 es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
2007}
2008
2009int ext4_update_overhead(struct super_block *sb, bool force)
2010{
2011 struct ext4_sb_info *sbi = EXT4_SB(sb);
2012
2013 if (ext4_emergency_state(sb) || sb_rdonly(sb))
2014 return 0;
2015 if (!force &&
2016 (sbi->s_overhead == 0 ||
2017 sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters)))
2018 return 0;
2019 return ext4_update_superblocks_fn(sb, func: set_overhead, arg: &sbi->s_overhead);
2020}
2021