Skip to content

Commit 7e619bc

Browse files
Abhijith Dasswhiteho
authored andcommitted
GFS2: Fix writing to non-page aligned gfs2_quota structures
This is the upstream fix for this bug. This patch differs from the RHEL5 fix (Red Hat bz #555754) which simply writes to the 8-byte value field of the quota. In upstream quota code, we're required to write the entire quota (88 bytes) which can be split across a page boundary. We check for such quotas, and read/write the two parts from/to the corresponding pages holding these parts. With this patch, I don't see the bug anymore using the reproducer in Red Hat bz 555754. I successfully ran a couple of simple tests/mounts/ umounts and it doesn't seem like this patch breaks anything else. Signed-off-by: Abhi Das <[email protected]> Signed-off-by: Steven Whitehouse <[email protected]>
1 parent 913a71d commit 7e619bc

File tree

1 file changed

+61
-25
lines changed

1 file changed

+61
-25
lines changed

fs/gfs2/quota.c

Lines changed: 61 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -637,15 +637,40 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
637637
unsigned blocksize, iblock, pos;
638638
struct buffer_head *bh, *dibh;
639639
struct page *page;
640-
void *kaddr;
641-
struct gfs2_quota *qp;
642-
s64 value;
643-
int err = -EIO;
640+
void *kaddr, *ptr;
641+
struct gfs2_quota q, *qp;
642+
int err, nbytes;
644643
u64 size;
645644

646645
if (gfs2_is_stuffed(ip))
647646
gfs2_unstuff_dinode(ip, NULL);
648-
647+
648+
memset(&q, 0, sizeof(struct gfs2_quota));
649+
err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
650+
if (err < 0)
651+
return err;
652+
653+
err = -EIO;
654+
qp = &q;
655+
qp->qu_value = be64_to_cpu(qp->qu_value);
656+
qp->qu_value += change;
657+
qp->qu_value = cpu_to_be64(qp->qu_value);
658+
qd->qd_qb.qb_value = qp->qu_value;
659+
if (fdq) {
660+
if (fdq->d_fieldmask & FS_DQ_BSOFT) {
661+
qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
662+
qd->qd_qb.qb_warn = qp->qu_warn;
663+
}
664+
if (fdq->d_fieldmask & FS_DQ_BHARD) {
665+
qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
666+
qd->qd_qb.qb_limit = qp->qu_limit;
667+
}
668+
}
669+
670+
/* Write the quota into the quota file on disk */
671+
ptr = qp;
672+
nbytes = sizeof(struct gfs2_quota);
673+
get_a_page:
649674
page = grab_cache_page(mapping, index);
650675
if (!page)
651676
return -ENOMEM;
@@ -667,7 +692,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
667692
if (!buffer_mapped(bh)) {
668693
gfs2_block_map(inode, iblock, bh, 1);
669694
if (!buffer_mapped(bh))
670-
goto unlock;
695+
goto unlock_out;
696+
/* If it's a newly allocated disk block for quota, zero it */
697+
if (buffer_new(bh)) {
698+
memset(bh->b_data, 0, bh->b_size);
699+
set_buffer_uptodate(bh);
700+
}
671701
}
672702

673703
if (PageUptodate(page))
@@ -677,32 +707,34 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
677707
ll_rw_block(READ_META, 1, &bh);
678708
wait_on_buffer(bh);
679709
if (!buffer_uptodate(bh))
680-
goto unlock;
710+
goto unlock_out;
681711
}
682712

683713
gfs2_trans_add_bh(ip->i_gl, bh, 0);
684714

685715
kaddr = kmap_atomic(page, KM_USER0);
686-
qp = kaddr + offset;
687-
value = (s64)be64_to_cpu(qp->qu_value) + change;
688-
qp->qu_value = cpu_to_be64(value);
689-
qd->qd_qb.qb_value = qp->qu_value;
690-
if (fdq) {
691-
if (fdq->d_fieldmask & FS_DQ_BSOFT) {
692-
qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
693-
qd->qd_qb.qb_warn = qp->qu_warn;
694-
}
695-
if (fdq->d_fieldmask & FS_DQ_BHARD) {
696-
qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
697-
qd->qd_qb.qb_limit = qp->qu_limit;
698-
}
699-
}
716+
if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
717+
nbytes = PAGE_CACHE_SIZE - offset;
718+
memcpy(kaddr + offset, ptr, nbytes);
700719
flush_dcache_page(page);
701720
kunmap_atomic(kaddr, KM_USER0);
721+
unlock_page(page);
722+
page_cache_release(page);
723+
724+
/* If quota straddles page boundary, we need to update the rest of the
725+
* quota at the beginning of the next page */
726+
if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
727+
ptr = ptr + nbytes;
728+
nbytes = sizeof(struct gfs2_quota) - nbytes;
729+
offset = 0;
730+
index++;
731+
goto get_a_page;
732+
}
702733

734+
/* Update the disk inode timestamp and size (if extended) */
703735
err = gfs2_meta_inode_buffer(ip, &dibh);
704736
if (err)
705-
goto unlock;
737+
goto out;
706738

707739
size = loc + sizeof(struct gfs2_quota);
708740
if (size > inode->i_size) {
@@ -715,7 +747,9 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
715747
brelse(dibh);
716748
mark_inode_dirty(inode);
717749

718-
unlock:
750+
out:
751+
return err;
752+
unlock_out:
719753
unlock_page(page);
720754
page_cache_release(page);
721755
return err;
@@ -779,8 +813,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
779813
* rgrp since it won't be allocated during the transaction
780814
*/
781815
al->al_requested = 1;
782-
/* +1 in the end for block requested above for unstuffing */
783-
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
816+
/* +3 in the end for unstuffing block, inode size update block
817+
* and another block in case quota straddles page boundary and
818+
* two blocks need to be updated instead of 1 */
819+
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
784820

785821
if (nalloc)
786822
al->al_requested += nalloc * (data_blocks + ind_blocks);

0 commit comments

Comments
 (0)