-// it is called by get_block when create == 0. Returns block number
-// for 'block'-th logical block of file. When it hits direct item it
-// returns 0 (being called from bmap) or read direct item into piece
-// of page (bh_result)
+/*
+** Get block number from the indirect item by position.
+*/
+static inline long iitem_get_blocknr (struct path *path, int pos)
+{
+ struct buffer_head * bh = get_last_bh (path);
+ struct item_head * ih = get_ih (path);
+ __u32 * ind_item;
-// Please improve the english/clarity in the comment above, as it is
-// hard to understand.
+ if (is_indirect_le_ih (ih)) {
+ ind_item = (__u32 *)B_I_PITEM (bh, ih);
+ return le32_to_cpu(ind_item [path->pos_in_item + pos]);
+ }
-static int _get_block_create_0 (struct inode * inode, long block,
- struct buffer_head * bh_result,
- int args)
+ return 0;
+}
+
+/*
+** Get the indirect item size.
+*/
+static inline int iitem_size (struct path *path)
{
- INITIALIZE_PATH (path);
- struct cpu_key key;
- struct buffer_head * bh;
- struct item_head * ih, tmp_ih;
- int fs_gen ;
- int blocknr;
- char * p = NULL;
- int chars;
- int ret ;
- int done = 0 ;
- unsigned long offset ;
+ struct item_head * ih = get_ih (path);
+ return (I_UNFM_NUM(ih) - (path->pos_in_item + 1));
+}
- // prepare the key to look for the 'block'-th block of file
- make_cpu_key (&key, inode,
- (loff_t)block * inode->i_sb->s_blocksize + 1, TYPE_ANY, 3);
+/*
+** Return "1" if last position of the indirect item reached,
+** "0" - otherwise.
+*/
+static inline int last_pos_of_iitem (struct path *path, int pos)
+{
+ struct item_head * ih = get_ih (path);
+ return ((path->pos_in_item + 1 + pos) >= (I_UNFM_NUM(ih)) ? 1 : 0);
+}
-research:
- if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND) {
- pathrelse (&path);
- if (p)
- kunmap(bh_result->b_page) ;
- // We do not return -ENOENT if there is a hole but page is uptodate, because it means
- // That there is some MMAPED data associated with it that is yet to be written to disk.
- if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) {
- return -ENOENT ;
+/*
+** Get the number of contiguous blocks in the indirect item
+** from given pos to the end of the item.
+*/
+static inline int iitem_amount_contiguous (struct path *path, int pos)
+{
+ long curr = 0;
+ long next = 0;
+ int item_size = iitem_size(path);
+ int amount = 1;
+
+ if (pos >= item_size) {
+ return 0;
+ }
+ curr = iitem_get_blocknr(path, pos++);
+
+ if (curr==0) {
+ while (pos <= item_size) {
+ next = iitem_get_blocknr(path, pos++);
+ if (next != 0) break;
+ amount++;
+ }
+ return amount;
+ }
+
+ while (pos <= item_size) {
+ next = iitem_get_blocknr(path, pos++);
+ if ((next - curr) != 1) break;
+ curr = next;
+ amount++;
+ }
+
+ return amount;
+}
+
+/*
+** Return "1" if fs changed and item moved.
+*/
+static inline int need_research (int fs_gen, struct super_block * sb,
+ struct item_head * ih, struct path * path )
+{
+ return (fs_changed(fs_gen, sb) && item_moved(ih, path));
+}
+
+/*
+** Calc iicache array size (asize).
+*/
+static inline int calc_iicache_asize (struct inode * inode, long block,
+ struct path * path, struct cpu_key * key)
+{
+ long blocknr=0, blk=block;
+ int pos=0;
+ int amount=0,i=0;
+
+ long file_size = inode->i_size >> inode->i_blkbits;
+
+ struct super_block * sb = inode->i_sb;
+ struct item_head * ih = get_ih (path);
+
+ if (file_size > 8) file_size=8;
+
+ for (i=0; i<file_size; i++) {
+
+ amount = iitem_amount_contiguous (path, pos);
+
+ if (amount==0) {
+ break;
+ }
+
+ pos += amount;
+ blk += amount;
+
+ if (pos <= last_pos_of_iitem(path, pos)) {
+ continue;
+ }
+
+ if((blk * sb->s_blocksize) < inode->i_size) {
+ if ((i+1) < file_size) {
+ set_cpu_key_k_offset (key, cpu_key_k_offset(key) + pos * sb->s_blocksize);
+
+ if (search_for_position_by_key (sb, key, path) != POSITION_FOUND) {
+ break;
+ }
+
+ ih = get_ih (path);
+ if (!is_indirect_le_ih(ih) ||
+ (le_ih_k_offset(ih) + path->pos_in_item) > inode->i_size) {
+ break ;
}
- return 0 ;
+ pos=0; amount=0;
+
+ }
}
-
- //
- bh = get_last_bh (&path);
- ih = get_ih (&path);
- if (is_indirect_le_ih (ih)) {
- __u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih);
+
+ }
+
+ return i;
+
+}
+
+/* Fill indirect item cache.
+** Put N block numbers from current indirect item.
+*/
+static inline void iicache_fill (struct inode * inode, long block,
+ struct path * path, struct cpu_key * key)
+{
+ long blocknr=0, blk=block;
+ int pos=0;
+ int amount=0,i=0;
+ long file_size = inode->i_size >> inode->i_blkbits;
+ int asize = (file_size > 1012)?(file_size/1012+8):(file_size/8 + 1);
+ int iic_size = (sizeof(struct iicache)) * asize;
+ struct super_block * sb = inode->i_sb;
+ struct item_head * ih = get_ih (path);
+ struct cpu_key key1;
+ unsigned long offset ;
+
+ offset = block * sb->s_blocksize + 1;
+ make_cpu_key (&key1, inode, (loff_t)offset, TYPE_ANY, 3);
+
+ asize = calc_iicache_asize(inode, block, path, &key1);
+
+ if (asize ==0) asize=1;
+ if (asize > 8) asize=8;
+
+ iic_size = (sizeof(struct iicache)) * 8; // * asize;
+
+ if (inode->u.reiserfs_i.iic==NULL) {
+ inode->u.reiserfs_i.iic = (struct iicache *)kmalloc(iic_size, GFP_NOFS);
+ if (inode->u.reiserfs_i.iic==NULL) {
+ return;
+ }
+ iicache_set_asize(inode, asize);
+ }
+ iicache_clear(inode);
+
+ if (search_for_position_by_key (sb, key, path) != POSITION_FOUND) {
+ return;
+ }
+
+ for (i=0; i<iicache_get_asize(inode); i++) {
+
+ amount = iitem_amount_contiguous (path, pos);
+ blocknr = iitem_get_blocknr (path, pos);
+
+ if ((amount>0) && (amount<=1012)) {
+ iicache_set (inode, amount, IICACHE_SIZE, i);
+ iicache_set (inode, blk, IICACHE_BLOCK, i);
+ iicache_set (inode, blocknr, IICACHE_BLOCKNR,i);
+ } else {
+ break;
+ }
+
+ pos += amount;
+ blk += amount;
+
+ if (pos <= last_pos_of_iitem(path, pos)) continue;
+
+ if((blk * sb->s_blocksize) < inode->i_size) {
+ if ((i+1) < iicache_get_asize(inode)) {
+ set_cpu_key_k_offset (key, cpu_key_k_offset(key) + pos * sb->s_blocksize);
+
+ if (search_for_position_by_key (sb, key, path) != POSITION_FOUND) {
+ break;
+ }
+
+ ih = get_ih (path);
+ if (!is_indirect_le_ih(ih) ||
+ (le_ih_k_offset(ih) + path->pos_in_item) > inode->i_size) {
+ break ;
+ }
+ pos=0; amount=0;
- /* FIXME: here we could cache indirect item or part of it in
- the inode to avoid search_by_key in case of subsequent
- access to file */
- blocknr = get_block_num(ind_item, path.pos_in_item) ;
- ret = 0 ;
- if (blocknr) {
- bh_result->b_dev = inode->i_dev;
- bh_result->b_blocknr = blocknr;
- bh_result->b_state |= (1UL << BH_Mapped);
- } else
- // We do not return -ENOENT if there is a hole but page is uptodate, because it means
- // That there is some MMAPED data associated with it that is yet to be written to disk.
- if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) {
- ret = -ENOENT ;
- }
+ }
+ }
+ }
+
+ if (i < iicache_get_asize(inode)) {
+ iicache_clear_from_pos(inode, i);
+ }
+
+}
+
+/*
+** Truncate indirect item cache.
+*/
+static inline void iicache_truncate (struct inode * inode)
+{
+ long new_file_end = inode->i_size >> inode->i_blkbits;
+ long last_cached, truncate_size, ii_size=0, n=0;
+ int i=0;
+
+ if (inode->u.reiserfs_i.iic==NULL) return;
+
+ if (iicache_size(inode,0)) {
+ if (new_file_end <= iicache_first_cached(inode,0)) {
+ iicache_clear(inode);
+ return;
+ }
+ if ((n=block_is_iicached(inode, new_file_end))) {
+ last_cached = iicache_last_cached(inode, n-1);
+
+ if (iicache_size(inode,n) && (new_file_end <= last_cached)) {
+ truncate_size = last_cached - new_file_end + 1;
+ ii_size = iicache_get (inode, IICACHE_SIZE, n-1);
+ ii_size -= truncate_size;
+ iicache_set (inode, ii_size, IICACHE_SIZE, n-1);
+ i=n;
+ while(i<iicache_get_asize(inode)) {
+ iicache_set (inode, 0, IICACHE_SIZE, i++);
+ }
+
+ }
- pathrelse (&path);
- if (p)
- kunmap(bh_result->b_page) ;
- return ret ;
}
+ }
+}
+
+
+/*
+** Helper function for _get_block_create_0
+*/
+static inline int iitem_map_indirect_block (struct path * path, struct inode * inode,
+ long block, struct buffer_head * bh_result,
+ int args, struct cpu_key * key)
+{
+ struct buffer_head * bh = get_last_bh (path);
+ struct item_head * ih = get_ih (path);
+ __u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih);
+ int blocknr= get_block_num(ind_item, path->pos_in_item) ;
+
+ // We do not return -ENOENT if there is a hole but page is uptodate, because it means
+ // That there is some MMAPED data associated with it that is yet to be written to disk.
+ if (!blocknr && (args & GET_BLOCK_NO_HOLE)&& !Page_Uptodate(bh_result->b_page)) {
+ return -ENOENT ;
+ }
+
+ // map the found block
+ set_block_dev_mapped (bh_result, blocknr, inode);
+
+ return 0;
+}
+
+
+
+/*
+** Helper function for _get_block_create_0
+*/
+static inline void path_relse_page_unmap (struct path * path, char * p,
+ struct page * page) {
+ pathrelse(path);
+ if (p)
+ kunmap(page);
+}
+
+/*
+** Handle Indirect Item case and simple direct case.
+** "gbc0" stands for "get_block_create_0"
+*/
+static inline int gbc0_indirect_case (char * p, struct path * path,
+ struct inode *inode, long block,
+ struct buffer_head * bh_result,
+ int args, struct cpu_key * key)
+{
+ struct super_block * sb = inode->i_sb;
+ struct page * page = bh_result->b_page;
+ struct item_head * ih = get_ih (path);
+ int ret=0;
+
+ // requested data are in indirect item(s)
+ if (is_indirect_le_ih (ih)) {
+
+ ret = iitem_map_indirect_block (path, inode, block, bh_result, args, key);
+ if (ret<0) {
+ path_relse_page_unmap (path, p, page);
+ return ret;
+ }
+
+ if (p)
+ kunmap(page);
+
+ /*
+ ** Here we fill indirect item cache or part of it
+ ** in the inode to avoid search_by_key in case of
+ ** subsequent access to file.
+ */
+ // if "iicache" mount option is used
+ if (reiserfs_iicache(sb)) {
+ iicache_fill (inode, block, path, key);
+ }
+ pathrelse(path);
+ //path_relse_page_unmap (path, p, page);
+ return 0 ;
+ }
+
+ return 1;
+}
+
+/*
+** Direct Item case start.
+** "gbc0" stands for "get_block_create_0"
+*/
+static inline int gbc0_direct_case_start (char * p, struct path * path,
+ struct inode *inode,
+ struct buffer_head * bh_result,
+ int args)
+{
+ struct page * page = bh_result->b_page;
// requested data are in direct item(s)
if (!(args & GET_BLOCK_READ_DIRECT)) {
- // we are called by bmap. FIXME: we can not map block of file
- // when it is stored in direct item(s)
- pathrelse (&path);
- if (p)
- kunmap(bh_result->b_page) ;
- return -ENOENT;
+ // we are called by bmap. FIXME: we can not map block of file
+ // when it is stored in direct item(s)
+ path_relse_page_unmap (path, p, page);
+ return -ENOENT;
}
/* if we've got a direct item, and the buffer was uptodate,
@@ -324,90 +598,207 @@
** end, where we map the buffer and return
*/
if (buffer_uptodate(bh_result)) {
- goto finished ;
- } else
- /*
- ** grab_tail_page can trigger calls to reiserfs_get_block on up to date
- ** pages without any buffers. If the page is up to date, we don't want
- ** read old data off disk. Set the up to date bit on the buffer instead
- ** and jump to the end
- */
- if (Page_Uptodate(bh_result->b_page)) {
- mark_buffer_uptodate(bh_result, 1);
- goto finished ;
+ set_block_dev_mapped (bh_result, 0, inode);
+ path_relse_page_unmap (path, p, page);
+ return 0;
+ } else {
+ /*
+ ** grab_tail_page can trigger calls to reiserfs_get_block on up to date
+ ** pages without any buffers. If the page is up to date, we don't want
+ ** read old data off disk. Set the up to date bit on the buffer instead
+ ** and jump to the end
+ */
+ if (Page_Uptodate(bh_result->b_page)) {
+ mark_buffer_uptodate(bh_result, 1);
+ set_block_dev_mapped (bh_result, 0, inode);
+ path_relse_page_unmap (path, p, page);
+ return 0;
+ }
}
+ return 1;
+}
- // read file tail into part of page
- offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ;
- fs_gen = get_generation(inode->i_sb) ;
- copy_item_head (&tmp_ih, ih);
-
- /* we only want to kmap if we are reading the tail into the page.
- ** this is not the common case, so we don't kmap until we are
- ** sure we need to. But, this means the item might move if
- ** kmap schedules
+/*
+** Handle Direct Item case.
+** "gbc0" stands for "get_block_create_0"
+*/
+static inline void gbc0_direct_case (char * p, struct path * path,
+ struct inode *inode,
+ struct cpu_key * key)
+{
+ struct buffer_head * bh;
+ struct super_block * sb = inode->i_sb;
+ struct item_head * ih = get_ih (path);
+ int chars=0, done=0;
+
+ do {
+ if (!is_direct_le_ih (ih)) {
+ BUG ();
+ }
+ /* make sure we don't read more bytes than actually exist in
+ ** the file. This can happen in odd cases where i_size isn't
+ ** correct, and when direct item padding results in a few
+ ** extra bytes at the end of the direct item
*/
- if (!p) {
- p = (char *)kmap(bh_result->b_page) ;
- if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
- goto research;
- }
+ if ((le_ih_k_offset(ih) + path->pos_in_item) > inode->i_size)
+ break ;
+
+ if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
+ chars = inode->i_size - (le_ih_k_offset(ih) - 1) - path->pos_in_item;
+ done = 1 ;
+ } else {
+ chars = ih_item_len(ih) - path->pos_in_item;
}
- p += offset ;
- memset (p, 0, inode->i_sb->s_blocksize);
- do {
- if (!is_direct_le_ih (ih)) {
- BUG ();
- }
- /* make sure we don't read more bytes than actually exist in
- ** the file. This can happen in odd cases where i_size isn't
- ** correct, and when direct item padding results in a few
- ** extra bytes at the end of the direct item
- */
- if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
- break ;
- if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
- chars = inode->i_size - (le_ih_k_offset(ih) - 1) - path.pos_in_item;
- done = 1 ;
- } else {
- chars = ih_item_len(ih) - path.pos_in_item;
- }
- memcpy (p, B_I_PITEM (bh, ih) + path.pos_in_item, chars);
- if (PATH_LAST_POSITION (&path) != (B_NR_ITEMS (bh) - 1))
- // we done, if read direct item is not the last item of
- // node FIXME: we could try to check right delimiting key
- // to see whether direct item continues in the right
- // neighbor or rely on i_size
- break;
+ p += chars;
- // update key to look for the next piece
- set_cpu_key_k_offset (&key, cpu_key_k_offset (&key) + chars);
- if (search_for_position_by_key (inode->i_sb, &key, &path) != POSITION_FOUND)
- // we read something from tail, even if now we got IO_ERROR
- break;
- bh = get_last_bh (&path);
- ih = get_ih (&path);
- } while (1);
+ if (PATH_LAST_POSITION (path) != (B_NR_ITEMS (bh) - 1))
+ // we done, if read direct item is not the last item of
+ // node FIXME: we could try to check right delimiting key
+ // to see whether direct item continues in the right
+ // neighbor or rely on i_size
+ break;
- flush_dcache_page(bh_result->b_page) ;
- kunmap(bh_result->b_page) ;
+ // update key to look for the next piece
+ set_cpu_key_k_offset (key, cpu_key_k_offset(key) + chars);
-finished:
- pathrelse (&path);
- bh_result->b_blocknr = 0 ;
- bh_result->b_dev = inode->i_dev;
+ if (search_for_position_by_key (sb, key, path) != POSITION_FOUND)
+ // we read something from tail, even if now we got IO_ERROR
+ break;
+
+ bh = get_last_bh (path);
+ ih = get_ih (path);
+
+ } while (1);
+
+}
+
+
+/*
+** Helper function for _get_block_create_0
+** Check iicache.
+** If needed block is in iicache we map it and return "1".
+*/
+static int check_iicache (struct inode * inode, long block,
+ struct buffer_head * bh_result, int w_flag)
+{
+ struct super_block * sb = inode->i_sb;
+ int n=0, block_nr=0;
+
+ /*
+ ** Here we use the cache of indirect item.
+ ** Getting the unfm_block number from the cache
+ ** we are trying to avoid some of the search_by_key() calls.
+ */
+
+ // if "iicache" mount option is used
+ if (reiserfs_iicache(sb)) {
+ if (inode->u.reiserfs_i.iic==NULL) {
+ return 0;
+ }
+ // Check iicache and get the iicache array number + 1 ,
+ // where the needed block_nr corresponded given logical block
+ // could be found.
+ n = block_is_iicached(inode, block);
+
+ // if the iicache is not empty for this file and
+ // the requested logical block of file is cached
+ // then we return corresponded block number.
+ if (n>0) {
+ block_nr = iicache_get_blocknr_by_block(inode, block, n-1);
+
+ if (w_flag && block_nr==0) return 0; /* do not write to hole */
+
+ if ((block_nr >= 0)) {
+ set_block_dev_mapped (bh_result, block_nr, inode);
+ //printk("n=%i, block=%li, block_nr=%i\n", n-1, block, block_nr);
+ //iicache_print(inode);
+ return 1;
+ }
+ }
+
+ }
+ return 0;
+}
+
+//
+// It is called by reiserfs_get_block when create == 0.
+// Returns disk block number by logical block number of file.
+//
+// When it hits direct item it returns 0 (being called from bmap)
+// or read direct item into piece of page (bh_result)
+//
+static int _get_block_create_0 (struct inode * inode, long block,
+ struct buffer_head * bh_result,
+ int args)
+{
+ INITIALIZE_PATH (path);
+ struct cpu_key key;
+ struct item_head * ih, tmp_ih;
+ struct super_block * sb = inode->i_sb;
+ struct page * page = bh_result->b_page;
+ char * p = NULL;
+ unsigned long offset ;
+ int fs_gen=0, ret=0, block_iicached=0;
+
+
+ block_iicached = check_iicache (inode, block, bh_result, 0);
+ if (block_iicached) {
+ return 0;
+ }
+
+ // prepare the key to look for the 'block'-th block of file
+ offset = block * sb->s_blocksize + 1;
+ make_cpu_key (&key, inode, (loff_t)offset, TYPE_ANY, 3);
+
+ do {
+
+ if (search_for_position_by_key (sb, &key, &path) != POSITION_FOUND) {
+ path_relse_page_unmap (&path, p, page);
+ // We do not return -ENOENT if there is a hole but page is uptodate, because it means
+ // That there is some MMAPED data associated with it that is yet to be written to disk.
+ return (((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page)) ? (-ENOENT) : 0 ) ;
+ }
+
+ // check and handle indirect case
+ ret = gbc0_indirect_case (p, &path, inode, block, bh_result, args, &key);
+ if (ret <= 0)
+ return ret;
+
+ // start the direct case
+ ret = gbc0_direct_case_start (p, &path, inode, bh_result, args);
+ if (ret <= 0)
+ return ret;
+
+ // we should read the file tail into part of page.
+ offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1) ;
+ fs_gen = get_generation(sb) ;
+ ih = get_ih (&path);
+ copy_item_head (&tmp_ih, ih);
+ if (!p)
+ p=(char *)kmap(page);
+
+ } while (need_research(fs_gen, sb, &tmp_ih, &path));
+
+ // ok, we have direct item and kmapped page,
+ // do copy from direct item to page now.
+ p += offset;
+ memset (p, 0, sb->s_blocksize);
+ gbc0_direct_case (p, &path, inode, &key);
+
+ flush_dcache_page(page) ;
+ path_relse_page_unmap (&path, p, page);
+ set_block_dev_mapped (bh_result, 0, inode);
mark_buffer_uptodate (bh_result, 1);
- bh_result->b_state |= (1UL << BH_Mapped);
return 0;
}
-
// this is called to create file map. So, _get_block_create_0 will not
// read direct item
int reiserfs_bmap (struct inode * inode, long block,
@@ -560,10 +951,13 @@
struct cpu_key key;
struct buffer_head * bh, * unbh = 0;
struct item_head * ih, tmp_ih;
+ struct super_block * sb = inode->i_sb;
__u32 * item;
int done;
int fs_gen;
int windex ;
+ int block_iicached=0;
+
struct reiserfs_transaction_handle th ;
/* space reserved in transaction batch:
. 3 balancings in direct->indirect conversion
@@ -590,6 +984,7 @@
return -EFBIG;
}
+
/* if !create, we aren't changing the FS, so we don't need to
** log anything, so we don't need to start a transaction
*/
@@ -601,19 +996,34 @@
unlock_kernel() ;
return ret;
}
+#if 0
+ /*
+ ** If iicache hash needed disk block number and it is not hole
+ ** we return it from iicache.
+ */
+ if (reiserfs_iicache(sb)) {
+ block_iicached = check_iicache (inode, block, bh_result, 1);
+ if (block_iicached) {
+ unlock_kernel() ;
+ return 0;
+ }
+ }
+#endif
- /* set the key of the first byte in the 'block'-th block of file */
- make_cpu_key (&key, inode, new_offset,
- TYPE_ANY, 3/*key length*/);
+ /* set the key of the first byte
+ in the 'block'-th block of file */
+ make_cpu_key (&key, inode, new_offset, TYPE_ANY, 3);
+
if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
journal_begin(&th, inode->i_sb, jbegin_count) ;
reiserfs_update_inode_transaction(inode) ;
transaction_started = 1 ;
}
+
research:
retval = search_for_position_by_key (inode->i_sb, &key, &path);
@@ -683,14 +1093,14 @@
inode->i_blocks += (inode->i_sb->s_blocksize / 512) ;
reiserfs_update_sd(&th, inode) ;
}
+
set_block_dev_mapped(bh_result, unfm_ptr, inode);
pathrelse (&path);
pop_journal_writer(windex) ;
if (transaction_started)
journal_end(&th, inode->i_sb, jbegin_count) ;
-
unlock_kernel() ;
-
+
/* the item was found, so new blocks were not added to the file
** there is no need to make sure the inode is updated with this
** transaction
@@ -921,6 +1331,10 @@
/* we want the offset for the first byte after the end of the file */
@@ -1792,6 +2213,7 @@
journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1 ) ;
reiserfs_update_inode_transaction(p_s_inode) ;
windex = push_journal_writer("reiserfs_vfs_truncate_file") ;
+
if (update_timestamps)
/* we are doing real truncate: if the system crashes before the last
transaction of truncating gets committed - on reboot the file
diff -urN v2.4.19p6/fs/reiserfs/procfs.c linux/fs/reiserfs/procfs.c
--- v2.4.19p6/fs/reiserfs/procfs.c Sat Apr 6 01:45:31 2002
+++ linux/fs/reiserfs/procfs.c Mon Apr 15 00:13:07 2002
@@ -149,7 +149,7 @@
r = &sb->u.reiserfs_sb;
len += sprintf( &buffer[ len ],
"state: \t%s\n"
- "mount options: \t%s%s%s%s%s%s%s%s%s%s%s%s\n"
+ "mount options: \t%s%s%s%s%s%s%s%s%s%s%s%s%s\n"
"gen. counter: \t%i\n"
"s_kmallocs: \t%i\n"
"s_disk_reads: \t%i\n"
@@ -192,6 +192,7 @@
reiserfs_hashed_relocation( sb ) ? "UNHASHED_RELOCATION " : "",
reiserfs_test4( sb ) ? "TEST4 " : "",
dont_have_tails( sb ) ? "NO_TAILS " : "TAILS ",
+ reiserfs_iicache( sb ) ? "IICACHE " : "NO_IICACHE ",
replay_only( sb ) ? "REPLAY_ONLY " : "",
reiserfs_dont_log( sb ) ? "DONT_LOG " : "LOG ",
convert_reiserfs( sb ) ? "CONV " : "",
diff -urN v2.4.19p6/fs/reiserfs/super.c linux/fs/reiserfs/super.c
--- v2.4.19p6/fs/reiserfs/super.c Sat Apr 6 01:45:31 2002
+++ linux/fs/reiserfs/super.c Sun Apr 14 23:59:34 2002
@@ -455,6 +455,8 @@
set_bit (REISERFS_HASHED_RELOCATION, mount_options);
} else if (!strcmp (this_char, "test4")) {
set_bit (REISERFS_TEST4, mount_options);
+ } else if (!strcmp (this_char, "iicache")) {
+ set_bit (REISERFS_IICACHE, mount_options);
} else if (!strcmp (this_char, "nolog")) {
reiserfs_warning("reiserfs: nolog mount option not supported yet\n");
} else if (!strcmp (this_char, "replayonly")) {
@@ -549,6 +551,7 @@
/* set options in the super-block bitmask */
SET_OPT( NOTAIL, mount_options, s );
+ SET_OPT( REISERFS_IICACHE, mount_options, s );
SET_OPT( REISERFS_NO_BORDER, mount_options, s );
SET_OPT( REISERFS_NO_UNHASHED_RELOCATION, mount_options, s );
SET_OPT( REISERFS_HASHED_RELOCATION, mount_options, s );
@@ -557,7 +560,7 @@
#undef SET_OPT
#include <linux/list.h>
+
+// The cache for indirect item (iicache).
+struct iicache {
+ long i_cache_blocknr; /* the first of set of contiguous blocknrs */
+ long i_cache_size ; /* the amount of set of contiguous blocknrs */
+ long i_cache_block ; /* the first, cached logical block of file */
+};
/** bitmasks for i_flags field in reiserfs-specific part of inode */
typedef enum {
@@ -46,6 +53,11 @@
** flushed */
unsigned long i_trans_id ;
unsigned long i_trans_index ;
+
+ // The cache for indirect item (iicache).
+ struct iicache * iic;
+ int iic_asize; /* iicache array size */
+ spinlock_t i_cache_lock; /* spimlock to protect iicache changing */
};