]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - fs/ntfs/file.c
[PATCH] fs/ntfs: Conversion to generic boolean
[linux-2.6.git] / fs / ntfs / file.c
index 2e42c2dcae12310d123c13642809219c5cb51126..585a79d39c9d0be6216b3a1f5385bbea876aef34 100644 (file)
@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
        u32 attr_rec_len = 0;
        unsigned blocksize, u;
        int err, mp_size;
-       BOOL rl_write_locked, was_hole, is_retry;
+       bool rl_write_locked, was_hole, is_retry;
        unsigned char blocksize_bits;
        struct {
                u8 runlist_merged:1;
@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
                                return -ENOMEM;
                }
        } while (++u < nr_pages);
-       rl_write_locked = FALSE;
+       rl_write_locked = false;
        rl = NULL;
        err = 0;
        vcn = lcn = -1;
        vcn_len = 0;
        lcn_block = -1;
-       was_hole = FALSE;
+       was_hole = false;
        cpos = pos >> vol->cluster_size_bits;
        end = pos + bytes;
        cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
@@ -760,7 +760,7 @@ map_buffer_cached:
                        }
                        continue;
                }
-               is_retry = FALSE;
+               is_retry = false;
                if (!rl) {
                        down_read(&ni->runlist.lock);
 retry_remap:
@@ -776,7 +776,7 @@ retry_remap:
                                 * Successful remap, setup the map cache and
                                 * use that to deal with the buffer.
                                 */
-                               was_hole = FALSE;
+                               was_hole = false;
                                vcn = bh_cpos;
                                vcn_len = rl[1].vcn - vcn;
                                lcn_block = lcn << (vol->cluster_size_bits -
@@ -792,7 +792,7 @@ retry_remap:
                                if (likely(vcn + vcn_len >= cend)) {
                                        if (rl_write_locked) {
                                                up_write(&ni->runlist.lock);
-                                               rl_write_locked = FALSE;
+                                               rl_write_locked = false;
                                        } else
                                                up_read(&ni->runlist.lock);
                                        rl = NULL;
@@ -818,13 +818,13 @@ retry_remap:
                                         */
                                        up_read(&ni->runlist.lock);
                                        down_write(&ni->runlist.lock);
-                                       rl_write_locked = TRUE;
+                                       rl_write_locked = true;
                                        goto retry_remap;
                                }
                                err = ntfs_map_runlist_nolock(ni, bh_cpos,
                                                NULL);
                                if (likely(!err)) {
-                                       is_retry = TRUE;
+                                       is_retry = true;
                                        goto retry_remap;
                                }
                                /*
@@ -903,7 +903,7 @@ rl_not_mapped_enoent:
                if (!rl_write_locked) {
                        up_read(&ni->runlist.lock);
                        down_write(&ni->runlist.lock);
-                       rl_write_locked = TRUE;
+                       rl_write_locked = true;
                        goto retry_remap;
                }
                /* Find the previous last allocated cluster. */
@@ -917,7 +917,7 @@ rl_not_mapped_enoent:
                        }
                }
                rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
-                               FALSE);
+                               false);
                if (IS_ERR(rl2)) {
                        err = PTR_ERR(rl2);
                        ntfs_debug("Failed to allocate cluster, error code %i.",
@@ -1093,7 +1093,7 @@ rl_not_mapped_enoent:
                status.mft_attr_mapped = 0;
                status.mp_rebuilt = 0;
                /* Setup the map cache and use that to deal with the buffer. */
-               was_hole = TRUE;
+               was_hole = true;
                vcn = bh_cpos;
                vcn_len = 1;
                lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
@@ -1105,7 +1105,7 @@ rl_not_mapped_enoent:
                 */
                if (likely(vcn + vcn_len >= cend)) {
                        up_write(&ni->runlist.lock);
-                       rl_write_locked = FALSE;
+                       rl_write_locked = false;
                        rl = NULL;
                }
                goto map_buffer_cached;
@@ -1117,7 +1117,7 @@ rl_not_mapped_enoent:
        if (likely(!err)) {
                if (unlikely(rl_write_locked)) {
                        up_write(&ni->runlist.lock);
-                       rl_write_locked = FALSE;
+                       rl_write_locked = false;
                } else if (unlikely(rl))
                        up_read(&ni->runlist.lock);
                rl = NULL;
@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write(
        do {
                s64 bh_pos;
                struct page *page;
-               BOOL partial;
+               bool partial;
 
                page = pages[u];
                bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
                bh = head = page_buffers(page);
-               partial = FALSE;
+               partial = false;
                do {
                        s64 bh_end;
 
                        bh_end = bh_pos + blocksize;
                        if (bh_end <= pos || bh_pos >= end) {
                                if (!buffer_uptodate(bh))
-                                       partial = TRUE;
+                                       partial = true;
                        } else {
                                set_buffer_uptodate(bh);
                                mark_buffer_dirty(bh);
@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                 */
                                down_read(&ni->runlist.lock);
                                lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
-                                               vol->cluster_size_bits, FALSE);
+                                               vol->cluster_size_bits, false);
                                up_read(&ni->runlist.lock);
                                if (unlikely(lcn < LCN_HOLE)) {
                                        status = -EIO;