void throttle_vm_writeout(gfp_t gfp_mask) { unsigned long background_thresh; unsigned long dirty_thresh; for ( ; ; ) { global_dirty_limits(&background_thresh, &dirty_thresh); /* * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ dirty_thresh += dirty_thresh / 10; /* wheeee... */ if (global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_WRITEBACK) <= dirty_thresh) break; congestion_wait(BLK_RW_ASYNC, HZ/10); /* * The caller might hold locks which can prevent IO completion * or progress in the filesystem. So we cannot just sit here * waiting for IO to complete. */ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) break; } }
unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && ((unsigned long)to) < TASK_SIZE) { if (in_atomic()) return n; while (n) { unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long len = PAGE_SIZE - offset; int retval; struct page *pg; void *maddr; if (len > n) len = n; survive: down_read(¤t->mm->mmap_sem); retval = get_user_pages(current, current->mm, (unsigned long)to, 1, 1, 0, &pg, NULL); if (retval == -ENOMEM && is_global_init(current)) { up_read(¤t->mm->mmap_sem); congestion_wait(BLK_RW_ASYNC, HZ/50); goto survive; } if (retval != 1) { up_read(¤t->mm->mmap_sem); break; } maddr = kmap_atomic(pg, KM_USER0); memcpy(maddr + offset, from, len); kunmap_atomic(maddr, KM_USER0); set_page_dirty_lock(pg); put_page(pg); up_read(¤t->mm->mmap_sem); from += len; to += len; n -= len; } return n; } #endif if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel(to, from, n); return n; }
static int fat_file_release(struct inode *inode, struct file *filp) { if ((filp->f_mode & FMODE_WRITE) && MSDOS_SB(inode->i_sb)->options.flush) { fat_flush_inodes(inode->i_sb, inode, NULL); congestion_wait(BLK_RW_ASYNC, HZ/10); } return 0; }
unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && ((unsigned long)to) < TASK_SIZE) { /* * When we are in an atomic section (see * mm/filemap.c:file_read_actor), return the full * length to take the slow path. */ if (in_atomic()) return n; /* * CPU does not honor the WP bit when writing * from supervisory mode, and due to preemption or SMP, * the page tables can change at any time. * Do it manually. Manfred <*****@*****.**> */ while (n) { unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long len = PAGE_SIZE - offset; int retval; struct page *pg; void *maddr; if (len > n) len = n; survive: down_read(¤t->mm->mmap_sem); retval = get_user_pages(current, current->mm, (unsigned long)to, 1, 1, 0, &pg, NULL); if (retval == -ENOMEM && is_global_init(current)) { up_read(¤t->mm->mmap_sem); congestion_wait(BLK_RW_ASYNC, HZ/50); goto survive; } if (retval != 1) { up_read(¤t->mm->mmap_sem); break; } <<<<<<< HEAD maddr = kmap_atomic(pg); memcpy(maddr + offset, from, len); kunmap_atomic(maddr); ======= <<<<<<< HEAD maddr = kmap_atomic(pg); memcpy(maddr + offset, from, len); kunmap_atomic(maddr); =======
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino) { struct inode *inode; retry: inode = f2fs_iget(sb, ino); if (IS_ERR(inode)) { if (PTR_ERR(inode) == -ENOMEM) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } } return inode; }
/* * writeback at least _min_pages, and keep writing until the amount of dirty * memory is less than the background threshold, or until we're all clean. */ static void background_writeout(unsigned long _min_pages) { long min_pages = _min_pages; struct writeback_control wbc = { .bdi = NULL, .sync_mode = WB_SYNC_NONE, .older_than_this = NULL, .nr_to_write = 0, .nonblocking = 1, .range_cyclic = 1, }; for ( ; ; ) { long background_thresh; long dirty_thresh; get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); if (global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS) < background_thresh && min_pages <= 0) break; wbc.more_io = 0; wbc.encountered_congestion = 0; wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.pages_skipped = 0; writeback_inodes(&wbc); min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { /* Wrote less than expected */ if (wbc.encountered_congestion || wbc.more_io) congestion_wait(WRITE, HZ/10); else break; } } } /* * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back * the whole world. Returns 0 if a pdflush thread was dispatched. Returns * -1 if all pdflush threads were busy. */ int wakeup_pdflush(long nr_pages) { if (nr_pages == 0) nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); return pdflush_operation(background_writeout, nr_pages); } static void wb_timer_fn(unsigned long unused); static void laptop_timer_fn(unsigned long unused); static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); /* * Periodic writeback of "old" data. * * Define "old": the first time one of an inode's pages is dirtied, we mark the * dirtying-time in the inode's address_space. So this periodic writeback code * just walks the superblock inode list, writing back any inodes which are * older than a specific point in time. * * Try to run once per dirty_writeback_interval. But if a writeback event * takes longer than a dirty_writeback_interval interval, then leave a * one-second gap. * * older_than_this takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ static void wb_kupdate(unsigned long arg) { unsigned long oldest_jif; unsigned long start_jif; unsigned long next_jif; long nr_to_write; struct writeback_control wbc = { .bdi = NULL, .sync_mode = WB_SYNC_NONE, .older_than_this = &oldest_jif, .nr_to_write = 0, .nonblocking = 1, .for_kupdate = 1, .range_cyclic = 1, }; sync_supers(); oldest_jif = jiffies - dirty_expire_interval; start_jif = jiffies; next_jif = start_jif + dirty_writeback_interval; nr_to_write = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS) + (inodes_stat.nr_inodes - inodes_stat.nr_unused); while (nr_to_write > 0) { wbc.more_io = 0; wbc.encountered_congestion = 0; wbc.nr_to_write = MAX_WRITEBACK_PAGES; writeback_inodes(&wbc); if (wbc.nr_to_write > 0) { if (wbc.encountered_congestion || wbc.more_io) congestion_wait(WRITE, HZ/10); else break; /* All the old data is written */ } nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; } if (time_before(next_jif, jiffies + HZ)) next_jif = jiffies + HZ; if (dirty_writeback_interval) mod_timer(&wb_timer, next_jif); } /* * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ int dirty_writeback_centisecs_handler(ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); if (dirty_writeback_interval) mod_timer(&wb_timer, jiffies + dirty_writeback_interval); else del_timer(&wb_timer); return 0; }
/* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force * the caller to perform writeback if the system is over `vm_dirty_ratio'. * If we're over `background_thresh' then pdflush is woken to perform some * writeout. */ static void balance_dirty_pages(struct address_space *mapping) { long nr_reclaimable, bdi_nr_reclaimable; long nr_writeback, bdi_nr_writeback; long background_thresh; long dirty_thresh; long bdi_thresh; unsigned long pages_written = 0; unsigned long write_chunk = sync_writeback_pages(); struct backing_dev_info *bdi = mapping->backing_dev_info; for (;;) { struct writeback_control wbc = { .bdi = bdi, .sync_mode = WB_SYNC_NONE, .older_than_this = NULL, .nr_to_write = write_chunk, .range_cyclic = 1, }; get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); nr_writeback = global_page_state(NR_WRITEBACK); bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) break; /* * Throttle it only when the background writeback cannot * catch-up. This avoids (excessively) small writeouts * when the bdi limits are ramping up. */ if (nr_reclaimable + nr_writeback < (background_thresh + dirty_thresh) / 2) break; if (!bdi->dirty_exceeded) bdi->dirty_exceeded = 1; /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. * Unstable writes are a feature of certain networked * filesystems (i.e. NFS) in which data may have been * written to the server's write cache, but has not yet * been flushed to permanent storage. */ if (bdi_nr_reclaimable) { writeback_inodes(&wbc); pages_written += write_chunk - wbc.nr_to_write; get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); } /* * In order to avoid the stacked BDI deadlock we need * to ensure we accurately count the 'dirty' pages when * the threshold is low. * * Otherwise it would be possible to get thresh+n pages * reported dirty, even though there are thresh-m pages * actually dirty; with m+n sitting in the percpu * deltas. */ if (bdi_thresh < 2*bdi_stat_error(bdi)) { bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); } else if (bdi_nr_reclaimable) { bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); } if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) break; if (pages_written >= write_chunk) break; /* We've done our duty */ congestion_wait(WRITE, HZ/10); } if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && bdi->dirty_exceeded) bdi->dirty_exceeded = 0; if (writeback_in_progress(bdi)) return; /* pdflush is already working this queue */ /* * In laptop mode, we wait until hitting the higher threshold before * starting background writeout, and then write out all the way down * to the lower threshold. So slow writers cause minimal disk activity. * * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ if ((laptop_mode && pages_written) || (!laptop_mode && (global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS) > background_thresh))) pdflush_operation(background_writeout, 0); } void set_page_dirty_balance(struct page *page, int page_mkwrite) { if (set_page_dirty(page) || page_mkwrite) { struct address_space *mapping = page_mapping(page); if (mapping) balance_dirty_pages_ratelimited(mapping); } } /** * balance_dirty_pages_ratelimited_nr - balance dirty memory state * @mapping: address_space which was dirtied * @nr_pages_dirtied: number of pages which the caller has just dirtied * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * On really big machines, get_writeback_state is expensive, so try to avoid * calling it too often (ratelimiting). But once we're over the dirty memory * limit we decrease the ratelimiting by a lot, to prevent individual processes * from overshooting the limit by (ratelimit_pages) each. */ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied) { static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; unsigned long ratelimit; unsigned long *p; ratelimit = ratelimit_pages; if (mapping->backing_dev_info->dirty_exceeded) ratelimit = 8; /* * Check the rate limiting. Also, we do not want to throttle real-time * tasks in balance_dirty_pages(). Period. */ preempt_disable(); p = &__get_cpu_var(ratelimits); *p += nr_pages_dirtied; if (unlikely(*p >= ratelimit)) { *p = 0; preempt_enable(); balance_dirty_pages(mapping); return; } preempt_enable(); } EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); void throttle_vm_writeout(gfp_t gfp_mask) { long background_thresh; long dirty_thresh; for ( ; ; ) { get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); /* * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ dirty_thresh += dirty_thresh / 10; /* wheeee... */ if (global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_WRITEBACK) <= dirty_thresh) break; congestion_wait(WRITE, HZ/10); /* * The caller might hold locks which can prevent IO completion * or progress in the filesystem. So we cannot just sit here * waiting for IO to complete. */ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) break; } }
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page) { struct dnode_of_data dn; struct node_info ni; unsigned int start, end; int err = 0, recovered = 0; /* step 1: recover xattr */ if (IS_INODE(page)) { f2fs_recover_inline_xattr(inode, page); } else if (f2fs_has_xattr_block(ofs_of_node(page))) { err = f2fs_recover_xattr_data(inode, page); if (!err) recovered++; goto out; } /* step 2: recover inline data */ if (f2fs_recover_inline_data(inode, page)) goto out; /* step 3: recover data indices */ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); end = start + ADDRS_PER_PAGE(page, inode); set_new_dnode(&dn, inode, NULL, NULL, 0); retry_dn: err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE); if (err) { if (err == -ENOMEM) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry_dn; } goto out; } f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); err = f2fs_get_node_info(sbi, dn.nid, &ni); if (err) goto err; f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { f2fs_msg(sbi->sb, KERN_WARNING, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", inode->i_ino, ofs_of_node(dn.node_page), ofs_of_node(page)); err = -EFAULT; goto err; } for (; start < end; start++, dn.ofs_in_node++) { block_t src, dest; src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); dest = datablock_addr(dn.inode, page, dn.ofs_in_node); if (__is_valid_data_blkaddr(src) && !f2fs_is_valid_blkaddr(sbi, src, META_POR)) { err = -EFAULT; goto err; } if (__is_valid_data_blkaddr(dest) && !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { err = -EFAULT; goto err; } /* skip recovering if dest is the same as src */ if (src == dest) continue; /* dest is invalid, just invalidate src block */ if (dest == NULL_ADDR) { f2fs_truncate_data_blocks_range(&dn, 1); continue; } if (!file_keep_isize(inode) && (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) f2fs_i_size_write(inode, (loff_t)(start + 1) << PAGE_SHIFT); /* * dest is reserved block, invalidate src block * and then reserve one new block in dnode page. */ if (dest == NEW_ADDR) { f2fs_truncate_data_blocks_range(&dn, 1); f2fs_reserve_new_block(&dn); continue; } /* dest is valid block, try to recover from src to dest */ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { if (src == NULL_ADDR) { err = f2fs_reserve_new_block(&dn); while (err && IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) err = f2fs_reserve_new_block(&dn); /* We should not get -ENOSPC */ f2fs_bug_on(sbi, err); if (err) goto err; } retry_prev: /* Check the previous node page having this index */ err = check_index_in_prev_nodes(sbi, dest, &dn); if (err) { if (err == -ENOMEM) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry_prev; } goto err; } /* write dummy data page */ f2fs_replace_block(sbi, &dn, src, dest, ni.version, false, false); recovered++; } } copy_node_footer(dn.node_page, page); fill_node_footer(dn.node_page, dn.nid, ni.ino, ofs_of_node(page), false); set_page_dirty(dn.node_page); err: f2fs_put_dnode(&dn); out: f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", inode->i_ino, file_keep_isize(inode) ? "keep" : "recover", recovered, err); return err; }
static int start_this_handle(journal_t *journal, handle_t *handle, gfp_t gfp_mask) { transaction_t *transaction, *new_transaction = NULL; tid_t tid; int needed, need_to_start; int nblocks = handle->h_buffer_credits; unsigned long ts = jiffies; if (nblocks > journal->j_max_transaction_buffers) { printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n", current->comm, nblocks, journal->j_max_transaction_buffers); return -ENOSPC; } alloc_transaction: if (!journal->j_running_transaction) { new_transaction = kmem_cache_alloc(transaction_cache, gfp_mask | __GFP_ZERO); if (!new_transaction) { if ((gfp_mask & __GFP_FS) == 0) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto alloc_transaction; } return -ENOMEM; } } jbd_debug(3, "New handle %p going live.\n", handle); repeat: read_lock(&journal->j_state_lock); BUG_ON(journal->j_flags & JBD2_UNMOUNT); if (is_journal_aborted(journal) || (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { read_unlock(&journal->j_state_lock); jbd2_journal_free_transaction(new_transaction); return -EROFS; } if (journal->j_barrier_count) { read_unlock(&journal->j_state_lock); wait_event(journal->j_wait_transaction_locked, journal->j_barrier_count == 0); goto repeat; } if (!journal->j_running_transaction) { read_unlock(&journal->j_state_lock); if (!new_transaction) goto alloc_transaction; write_lock(&journal->j_state_lock); if (journal->j_barrier_count) { printk(KERN_WARNING "JBD: %s: wait for transaction barrier\n", __func__); write_unlock(&journal->j_state_lock); goto repeat; } if (!journal->j_running_transaction) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } write_unlock(&journal->j_state_lock); goto repeat; } transaction = journal->j_running_transaction; if (transaction->t_state == T_LOCKED) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } needed = atomic_add_return(nblocks, &transaction->t_outstanding_credits); if (needed > journal->j_max_transaction_buffers) { DEFINE_WAIT(wait); jbd_debug(2, "Handle %p starting new commit...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); read_unlock(&journal->j_state_lock); write_lock(&journal->j_state_lock); if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) __jbd2_log_wait_for_space(journal); write_unlock(&journal->j_state_lock); goto repeat; } update_t_max_wait(transaction, ts); handle->h_transaction = transaction; atomic_inc(&transaction->t_updates); atomic_inc(&transaction->t_handle_count); jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", handle, nblocks, atomic_read(&transaction->t_outstanding_credits), __jbd2_log_space_left(journal)); read_unlock(&journal->j_state_lock); lock_map_acquire(&handle->h_lockdep_map); jbd2_journal_free_transaction(new_transaction); return 0; }