dev_t ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi) { __u32 fs32; dev_t dev; if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[1]); else fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[0]); switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUNx86: case UFS_ST_SUN: if ((fs32 & 0xffff0000) == 0 || (fs32 & 0xffff0000) == 0xffff0000) dev = old_decode_dev(fs32 & 0x7fff); else dev = MKDEV(sysv_major(fs32), sysv_minor(fs32)); break; default: dev = old_decode_dev(fs32); break; } return dev; }
static int cramfs_iget5_set(struct inode *inode, void *opaque) { static struct timespec zerotime; struct cramfs_inode *cramfs_inode = opaque; inode->i_mode = cramfs_inode->mode; inode->i_uid = cramfs_inode->uid; inode->i_size = cramfs_inode->size; inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; inode->i_gid = cramfs_inode->gid; /* Struct copy intentional */ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; inode->i_ino = CRAMINO(cramfs_inode); /* inode->i_nlink is left 1 - arguably wrong for directories, but it's the best we can do without reading the directory contents. 1 yields the right result in GNU find, even without -noleaf option. */ if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; inode->i_data.a_ops = &cramfs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &cramfs_dir_inode_operations; inode->i_fop = &cramfs_directory_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &cramfs_aops; } else { inode->i_size = 0; inode->i_blocks = 0; init_special_inode(inode, inode->i_mode, old_decode_dev(cramfs_inode->size)); } return 0; }
/* * The minix V2 function to read an inode. */ static void V2_minix_read_inode(struct inode * inode) { struct buffer_head * bh; struct minix2_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) { make_bad_inode(inode); return; } inode->i_mode = raw_inode->i_mode; inode->i_uid = (uid_t)raw_inode->i_uid; inode->i_gid = (gid_t)raw_inode->i_gid; inode->i_nlink = raw_inode->i_nlinks; inode->i_size = raw_inode->i_size; inode->i_mtime.tv_sec = raw_inode->i_mtime; inode->i_atime.tv_sec = raw_inode->i_atime; inode->i_ctime.tv_sec = raw_inode->i_ctime; inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; inode->i_blocks = 0; for (i = 0; i < 10; i++) minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); brelse(bh); }
/* * The minix V2 function to read an inode. */ static struct inode *V2_minix_iget(struct inode *inode) { struct buffer_head * bh; struct minix2_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) { iget_failed(inode); return ERR_PTR(-EIO); } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); set_nlink(inode, raw_inode->i_nlinks); inode->i_size = raw_inode->i_size; inode->i_mtime.tv_sec = raw_inode->i_mtime; inode->i_atime.tv_sec = raw_inode->i_atime; inode->i_ctime.tv_sec = raw_inode->i_ctime; inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; inode->i_blocks = 0; for (i = 0; i < 10; i++) minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); brelse(bh); unlock_new_inode(inode); return inode; }
struct inode *sysv_iget(struct super_block *sb, unsigned int ino) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; struct inode *inode; unsigned int block; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", sb->s_id, ino); return ERR_PTR(-EIO); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("Major problem: unable to read inode from dev %s\n", inode->i_sb->s_id); goto bad_inode; } /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */ inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode); inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid); inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid); set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink)); inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size); inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime); inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime); inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_ctime); inode->i_ctime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_blocks = 0; si = SYSV_I(inode); for (block = 0; block < 10+1+1+1; block++) read3byte(sbi, &raw_inode->i_data[3*block], (u8 *)&si->i_data[block]); brelse(bh); si->i_dir_start_lookup = 0; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) sysv_set_inode(inode, old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); else sysv_set_inode(inode, 0); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(-EIO); }
static int __init parse_tag_core(const struct tag *tag) { if (tag->hdr.size > 2) { if ((tag->u.core.flags & 1) == 0) root_mountflags &= ~MS_RDONLY; ROOT_DEV = old_decode_dev(tag->u.core.rootdev); } return 0; }
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) { if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { if (ri->i_addr[0]) inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0])); else inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1])); } }
static int do_read_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); struct page *node_page; struct f2fs_node *rn; struct f2fs_inode *ri; /* Check if ino is within scope */ if (check_nid_range(sbi, inode->i_ino)) { f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu", (unsigned long) inode->i_ino); return -EINVAL; } node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) return PTR_ERR(node_page); rn = F2FS_NODE(node_page); ri = &(rn->i); inode->i_mode = le16_to_cpu(ri->i_mode); inode->i_uid = le32_to_cpu(ri->i_uid); inode->i_gid = le32_to_cpu(ri->i_gid); set_nlink(inode, le32_to_cpu(ri->i_links)); inode->i_size = le64_to_cpu(ri->i_size); inode->i_blocks = le64_to_cpu(ri->i_blocks); inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); inode->i_generation = le32_to_cpu(ri->i_generation); if (ri->i_addr[0]) inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0])); else inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1])); fi->i_current_depth = le32_to_cpu(ri->i_current_depth); fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); fi->i_flags = le32_to_cpu(ri->i_flags); fi->flags = 0; fi->i_advise = ri->i_advise; fi->i_pino = le32_to_cpu(ri->i_pino); get_extent_info(&fi->ext, ri->i_ext); get_inline_info(fi, ri); f2fs_put_page(node_page, 1); return 0; }
static struct inode *get_cramfs_inode(struct super_block *sb, const struct cramfs_inode *cramfs_inode, unsigned int offset) { struct inode *inode; static struct timespec zerotime; inode = iget_locked(sb, cramino(cramfs_inode, offset)); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; switch (cramfs_inode->mode & S_IFMT) { case S_IFREG: inode->i_fop = &generic_ro_fops; inode->i_data.a_ops = &cramfs_aops; break; case S_IFDIR: inode->i_op = &cramfs_dir_inode_operations; inode->i_fop = &cramfs_directory_operations; break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &cramfs_aops; break; default: init_special_inode(inode, cramfs_inode->mode, old_decode_dev(cramfs_inode->size)); } inode->i_mode = cramfs_inode->mode; inode->i_uid = cramfs_inode->uid; inode->i_gid = cramfs_inode->gid; /* if the lower 2 bits are zero, the inode contains data */ if (!(inode->i_ino & 3)) { inode->i_size = cramfs_inode->size; inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; } /* Struct copy intentional */ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; /* inode->i_nlink is left 1 - arguably wrong for directories, but it's the best we can do without reading the directory contents. 1 yields the right result in GNU find, even without -noleaf option. */ unlock_new_inode(inode); return inode; }
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) { int extra_size = get_extra_isize(inode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { if (ri->i_addr[extra_size]) inode->i_rdev = old_decode_dev( le32_to_cpu(ri->i_addr[extra_size])); else inode->i_rdev = new_decode_dev( le32_to_cpu(ri->i_addr[extra_size + 1])); } }
/** * vxfs_iget - get an inode * @sbp: the superblock to get the inode for * @ino: the number of the inode to get * * Description: * vxfs_read_inode creates an inode, reads the disk inode for @ino and fills * in all relevant fields in the new inode. */ struct inode * vxfs_iget(struct super_block *sbp, ino_t ino) { struct vxfs_inode_info *vip; const struct address_space_operations *aops; struct inode *ip; ip = iget_locked(sbp, ino); if (!ip) return ERR_PTR(-ENOMEM); if (!(ip->i_state & I_NEW)) return ip; vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist); if (IS_ERR(vip)) { iget_failed(ip); return ERR_CAST(vip); } vxfs_iinit(ip, vip); if (VXFS_ISIMMED(vip)) aops = &vxfs_immed_aops; else aops = &vxfs_aops; if (S_ISREG(ip->i_mode)) { ip->i_fop = &generic_ro_fops; ip->i_mapping->a_ops = aops; } else if (S_ISDIR(ip->i_mode)) { ip->i_op = &vxfs_dir_inode_ops; ip->i_fop = &vxfs_dir_operations; ip->i_mapping->a_ops = aops; } else if (S_ISLNK(ip->i_mode)) { if (!VXFS_ISIMMED(vip)) { ip->i_op = &page_symlink_inode_operations; inode_nohighmem(ip); ip->i_mapping->a_ops = &vxfs_aops; } else { ip->i_op = &simple_symlink_inode_operations; ip->i_link = vip->vii_immed.vi_immed; nd_terminate_link(ip->i_link, ip->i_size, sizeof(vip->vii_immed.vi_immed) - 1); } } else init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); unlock_new_inode(ip); return ip; }
void __init setup_arch(char **cmdline_p) { ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); boot_cpu_data.cpu_clock = M32R_CPUCLK; boot_cpu_data.bus_clock = M32R_BUSCLK; boot_cpu_data.timer_divide = M32R_TIMER_DIVIDE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif #ifdef CONFIG_DISCONTIGMEM nodes_clear(node_online_map); node_set_online(0); node_set_online(1); #endif /* CONFIG_DISCONTIGMEM */ init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_phys(_text); code_resource.end = virt_to_phys(_etext)-1; data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; parse_mem_cmdline(cmdline_p); setup_memory(); paging_init(); }
static void get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) { if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { __le32 *rdev; if (is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR)) rdev = ri->i_nid; else rdev = ri->i_addr; if (rdev[0]) inode->i_rdev = old_decode_dev(le32_to_cpu(rdev[0])); else inode->i_rdev = new_decode_dev(le32_to_cpu(rdev[1])); } }
static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inode * cramfs_inode) { struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode), cramfs_iget5_test, cramfs_iget5_set, cramfs_inode); static struct timespec zerotime; if (inode && (inode->i_state & I_NEW)) { inode->i_mode = cramfs_inode->mode; inode->i_uid = cramfs_inode->uid; inode->i_size = cramfs_inode->size; inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; inode->i_blksize = PAGE_CACHE_SIZE; inode->i_gid = cramfs_inode->gid; /* Struct copy intentional */ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; inode->i_ino = CRAMINO(cramfs_inode); /* inode->i_nlink is left 1 - arguably wrong for directories, but it's the best we can do without reading the directory contents. 1 yields the right result in GNU find, even without -noleaf option. */ if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; inode->i_data.a_ops = &cramfs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &cramfs_dir_inode_operations; inode->i_fop = &cramfs_directory_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &cramfs_aops; } else { inode->i_size = 0; inode->i_blocks = 0; init_special_inode(inode, inode->i_mode, old_decode_dev(cramfs_inode->size)); } unlock_new_inode(inode); } return inode; }
static int cramfs_iget5_test(struct inode *inode, void *opaque) { struct cramfs_inode *cramfs_inode = opaque; if (inode->i_ino != CRAMINO(cramfs_inode)) return 0; /* does not match */ if (inode->i_ino != 1) return 1; /* all empty directories, char, block, pipe, and sock, share inode #1 */ if ((inode->i_mode != cramfs_inode->mode) || (inode->i_gid != cramfs_inode->gid) || (inode->i_uid != cramfs_inode->uid)) return 0; /* does not match */ if ((S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) && (inode->i_rdev != old_decode_dev(cramfs_inode->size))) return 0; /* does not match */ return 1; /* matches */ }
/** * vxfs_read_inode - fill in inode information * @ip: inode pointer to fill * * Description: * vxfs_read_inode reads the disk inode for @ip and fills * in all relevant fields in @ip. */ void vxfs_read_inode(struct inode *ip) { struct super_block *sbp = ip->i_sb; struct vxfs_inode_info *vip; struct address_space_operations *aops; ino_t ino = ip->i_ino; if (!(vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist))) return; vxfs_iinit(ip, vip); if (VXFS_ISIMMED(vip)) aops = &vxfs_immed_aops; else aops = &vxfs_aops; if (S_ISREG(ip->i_mode)) { ip->i_fop = &vxfs_file_operations; ip->i_mapping->a_ops = aops; } else if (S_ISDIR(ip->i_mode)) { ip->i_op = &vxfs_dir_inode_ops; ip->i_fop = &vxfs_dir_operations; ip->i_mapping->a_ops = aops; } else if (S_ISLNK(ip->i_mode)) { if (!VXFS_ISIMMED(vip)) { ip->i_op = &page_symlink_inode_operations; ip->i_mapping->a_ops = &vxfs_aops; } else ip->i_op = &vxfs_immed_symlink_iops; } else init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); return; }
static long snapshot_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = 0; struct snapshot_data *data; loff_t size; sector_t offset; if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) return -ENOTTY; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!mutex_trylock(&pm_mutex)) return -EBUSY; data = filp->private_data; switch (cmd) { case SNAPSHOT_FREEZE: if (data->frozen) break; printk("Syncing filesystems ... "); sys_sync(); printk("done.\n"); error = usermodehelper_disable(); if (error) break; error = freeze_processes(); if (error) { thaw_processes(); usermodehelper_enable(); } if (!error) data->frozen = 1; break; case SNAPSHOT_UNFREEZE: if (!data->frozen || data->ready) break; thaw_processes(); usermodehelper_enable(); data->frozen = 0; break; case SNAPSHOT_CREATE_IMAGE: case SNAPSHOT_ATOMIC_SNAPSHOT: if (data->mode != O_RDONLY || !data->frozen || data->ready) { error = -EPERM; break; } error = hibernation_snapshot(data->platform_support); if (!error) error = put_user(in_suspend, (int __user *)arg); if (!error) data->ready = 1; break; case SNAPSHOT_ATOMIC_RESTORE: snapshot_write_finalize(&data->handle); if (data->mode != O_WRONLY || !data->frozen || !snapshot_image_loaded(&data->handle)) { error = -EPERM; break; } error = hibernation_restore(data->platform_support); break; case SNAPSHOT_FREE: swsusp_free(); memset(&data->handle, 0, sizeof(struct snapshot_handle)); data->ready = 0; break; case SNAPSHOT_PREF_IMAGE_SIZE: case SNAPSHOT_SET_IMAGE_SIZE: image_size = arg; break; case SNAPSHOT_GET_IMAGE_SIZE: if (!data->ready) { error = -ENODATA; break; } size = snapshot_get_image_size(); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_AVAIL_SWAP_SIZE: case SNAPSHOT_AVAIL_SWAP: size = count_swap_pages(data->swap, 1); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_ALLOC_SWAP_PAGE: case SNAPSHOT_GET_SWAP_PAGE: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } offset = alloc_swapdev_block(data->swap); if (offset) { offset <<= PAGE_SHIFT; error = put_user(offset, (loff_t __user *)arg); } else { error = -ENOSPC; } break; case SNAPSHOT_FREE_SWAP_PAGES: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } free_all_swap_pages(data->swap); break; case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */ if (!swsusp_swap_in_use()) { /* * User space encodes device types as two-byte values, * so we need to recode them */ if (old_decode_dev(arg)) { data->swap = swap_type_of(old_decode_dev(arg), 0, NULL); if (data->swap < 0) error = -ENODEV; } else { data->swap = -1; error = -EINVAL; } } else { error = -EPERM; } break; case SNAPSHOT_S2RAM: if (!data->frozen) { error = -EPERM; break; } /* * Tasks are frozen and the notifiers have been called with * PM_HIBERNATION_PREPARE */ error = suspend_devices_and_enter(PM_SUSPEND_MEM); break; case SNAPSHOT_PLATFORM_SUPPORT: data->platform_support = !!arg; break; case SNAPSHOT_POWER_OFF: if (data->platform_support) error = hibernation_platform_enter(); break; case SNAPSHOT_PMOPS: /* This ioctl is deprecated */ error = -EINVAL; switch (arg) { case PMOPS_PREPARE: data->platform_support = 1; error = 0; break; case PMOPS_ENTER: if (data->platform_support) error = hibernation_platform_enter(); break; case PMOPS_FINISH: if (data->platform_support) error = 0; break; default: printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg); } break; case SNAPSHOT_SET_SWAP_AREA: if (swsusp_swap_in_use()) { error = -EPERM; } else { struct resume_swap_area swap_area; dev_t swdev; error = copy_from_user(&swap_area, (void __user *)arg, sizeof(struct resume_swap_area)); if (error) { error = -EFAULT; break; } /* * User space encodes device types as two-byte values, * so we need to recode them */ swdev = old_decode_dev(swap_area.dev); if (swdev) { offset = swap_area.offset; data->swap = swap_type_of(swdev, offset, NULL); if (data->swap < 0) error = -ENODEV; } else { data->swap = -1; error = -EINVAL; } } break; default: error = -ENOTTY; } mutex_unlock(&pm_mutex); return error; }
void __init setup_arch(char **cmdline_p) { unsigned long low_mem_size; unsigned long kernel_end; ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); drive_info = DRIVE_INFO; screen_info = SCREEN_INFO; edid_info = EDID_INFO; aux_device_present = AUX_DEVICE_INFO; saved_video_mode = SAVED_VIDEO_MODE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif setup_memory_region(); copy_edd(); if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) &_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; code_resource.start = virt_to_phys(&_text); code_resource.end = virt_to_phys(&_etext)-1; data_resource.start = virt_to_phys(&_etext); data_resource.end = virt_to_phys(&_edata)-1; parse_cmdline_early(cmdline_p); /* * partially used pages are not usable - thus * we are rounding upwards: */ end_pfn = e820_end_of_ram(); check_efer(); init_memory_mapping(); #ifdef CONFIG_DISCONTIGMEM numa_initmem_init(0, end_pfn); #else contig_initmem_init(); #endif /* Reserve direct mapping */ reserve_bootmem_generic(table_start << PAGE_SHIFT, (table_end - table_start) << PAGE_SHIFT); /* reserve kernel */ kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE); reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_generic(0, PAGE_SIZE); #ifdef CONFIG_SMP /* * But first pinch a few for the stack/trampoline stuff * FIXME: Don't need the extra page at 4K, but need to fix * trampoline before removing it. (see the GDT stuff) */ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE); /* Reserve SMP trampoline */ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE); #endif #ifdef CONFIG_ACPI_SLEEP /* * Reserve low memory region for sleep support. */ acpi_reserve_bootmem(); #endif #ifdef CONFIG_X86_LOCAL_APIC /* * Find and reserve possible boot-time SMP configuration: */ find_smp_config(); #endif #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { reserve_bootmem_generic(INITRD_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start+INITRD_SIZE; } else { printk(KERN_ERR "initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", (unsigned long)(INITRD_START + INITRD_SIZE), (unsigned long)(end_pfn << PAGE_SHIFT)); initrd_start = 0; } } #endif paging_init(); check_ioapic(); #ifdef CONFIG_ACPI_BOOT /* * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). * Must do this after paging_init (due to reliance on fixmap, and thus * the bootmem allocator) but before get_smp_config (to allow parsing * of MADT). */ acpi_boot_init(); #endif #ifdef CONFIG_X86_LOCAL_APIC /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); init_apic_mappings(); #endif /* * Request address space for all standard RAM and ROM resources * and also for regions reported as reserved by the e820. */ probe_roms(); e820_reserve_resources(); request_resource(&iomem_resource, &video_ram_resource); { unsigned i; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, &standard_io_resources[i]); } /* Will likely break when you have unassigned resources with more than 4GB memory and bridges that don't support more than 4GB. Doing it properly would require to use pci_alloc_consistent in this case. */ low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff; if (low_mem_size > pci_mem_start) pci_mem_start = low_mem_size; #ifdef CONFIG_GART_IOMMU iommu_hole_init(); #endif #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif }
static int snapshot_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int error = 0; struct snapshot_data *data; loff_t avail; sector_t offset; if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) return -ENOTTY; if (!capable(CAP_SYS_ADMIN)) return -EPERM; data = filp->private_data; switch (cmd) { case SNAPSHOT_FREEZE: if (data->frozen) break; mutex_lock(&pm_mutex); if (freeze_processes()) { thaw_processes(); error = -EBUSY; } mutex_unlock(&pm_mutex); if (!error) data->frozen = 1; break; case SNAPSHOT_UNFREEZE: if (!data->frozen || data->ready) break; mutex_lock(&pm_mutex); thaw_processes(); mutex_unlock(&pm_mutex); data->frozen = 0; break; case SNAPSHOT_ATOMIC_SNAPSHOT: if (data->mode != O_RDONLY || !data->frozen || data->ready) { error = -EPERM; break; } error = snapshot_suspend(data->platform_suspend); if (!error) error = put_user(in_suspend, (unsigned int __user *)arg); if (!error) data->ready = 1; break; case SNAPSHOT_ATOMIC_RESTORE: snapshot_write_finalize(&data->handle); if (data->mode != O_WRONLY || !data->frozen || !snapshot_image_loaded(&data->handle)) { error = -EPERM; break; } error = snapshot_restore(data->platform_suspend); break; case SNAPSHOT_FREE: swsusp_free(); memset(&data->handle, 0, sizeof(struct snapshot_handle)); data->ready = 0; break; case SNAPSHOT_SET_IMAGE_SIZE: image_size = arg; break; case SNAPSHOT_AVAIL_SWAP: avail = count_swap_pages(data->swap, 1); avail <<= PAGE_SHIFT; error = put_user(avail, (loff_t __user *)arg); break; case SNAPSHOT_GET_SWAP_PAGE: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } offset = alloc_swapdev_block(data->swap); if (offset) { offset <<= PAGE_SHIFT; error = put_user(offset, (sector_t __user *)arg); } else { error = -ENOSPC; } break; case SNAPSHOT_FREE_SWAP_PAGES: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } free_all_swap_pages(data->swap); break; case SNAPSHOT_SET_SWAP_FILE: if (!swsusp_swap_in_use()) { /* * User space encodes device types as two-byte values, * so we need to recode them */ if (old_decode_dev(arg)) { data->swap = swap_type_of(old_decode_dev(arg), 0, NULL); if (data->swap < 0) error = -ENODEV; } else { data->swap = -1; error = -EINVAL; } } else { error = -EPERM; } break; case SNAPSHOT_S2RAM: if (!pm_ops) { error = -ENOSYS; break; } if (!data->frozen) { error = -EPERM; break; } if (!mutex_trylock(&pm_mutex)) { error = -EBUSY; break; } if (pm_ops->prepare) { error = pm_ops->prepare(PM_SUSPEND_MEM); if (error) goto OutS3; } /* Put devices to sleep */ suspend_console(); error = device_suspend(PMSG_SUSPEND); if (error) { printk(KERN_ERR "Failed to suspend some devices.\n"); } else { error = disable_nonboot_cpus(); if (!error) { /* Enter S3, system is already frozen */ suspend_enter(PM_SUSPEND_MEM); enable_nonboot_cpus(); } /* Wake up devices */ device_resume(); } resume_console(); if (pm_ops->finish) pm_ops->finish(PM_SUSPEND_MEM); OutS3: mutex_unlock(&pm_mutex); break; case SNAPSHOT_PMOPS: error = -EINVAL; switch (arg) { case PMOPS_PREPARE: if (hibernation_ops) { data->platform_suspend = 1; error = 0; } else { error = -ENOSYS; } break; case PMOPS_ENTER: if (data->platform_suspend) { kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); error = hibernation_ops->enter(); } break; case PMOPS_FINISH: if (data->platform_suspend) error = 0; break; default: printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg); } break; case SNAPSHOT_SET_SWAP_AREA: if (swsusp_swap_in_use()) { error = -EPERM; } else { struct resume_swap_area swap_area; dev_t swdev; error = copy_from_user(&swap_area, (void __user *)arg, sizeof(struct resume_swap_area)); if (error) { error = -EFAULT; break; } /* * User space encodes device types as two-byte values, * so we need to recode them */ swdev = old_decode_dev(swap_area.dev); if (swdev) { offset = swap_area.offset; data->swap = swap_type_of(swdev, offset, NULL); if (data->swap < 0) error = -ENODEV; } else { data->swap = -1; error = -EINVAL; } } break; default: error = -ENOTTY; } return error; }
/* * Use the given filehandle to look up the corresponding export and * dentry. On success, the results are used to set fh_export and * fh_dentry. */ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp) { struct knfsd_fh *fh = &fhp->fh_handle; struct fid *fid = NULL, sfid; struct svc_export *exp; struct dentry *dentry; int fileid_type; int data_left = fh->fh_size/4; __be32 error; error = nfserr_stale; if (rqstp->rq_vers > 2) error = nfserr_badhandle; if (rqstp->rq_vers == 4 && fh->fh_size == 0) return nfserr_nofilehandle; if (fh->fh_version == 1) { int len; if (--data_left < 0) return error; if (fh->fh_auth_type != 0) return error; len = key_len(fh->fh_fsid_type) / 4; if (len == 0) return error; if (fh->fh_fsid_type == FSID_MAJOR_MINOR) { /* deprecated, convert to type 3 */ len = key_len(FSID_ENCODE_DEV)/4; fh->fh_fsid_type = FSID_ENCODE_DEV; fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl(fh->fh_fsid[0]), ntohl(fh->fh_fsid[1]))); fh->fh_fsid[1] = fh->fh_fsid[2]; } data_left -= len; if (data_left < 0) return error; exp = rqst_exp_find(rqstp, fh->fh_fsid_type, fh->fh_auth); fid = (struct fid *)(fh->fh_auth + len); } else { __u32 tfh[2]; dev_t xdev; ino_t xino; if (fh->fh_size != NFS_FHSIZE) return error; /* assume old filehandle format */ xdev = old_decode_dev(fh->ofh_xdev); xino = u32_to_ino_t(fh->ofh_xino); mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL); exp = rqst_exp_find(rqstp, FSID_DEV, tfh); } error = nfserr_stale; if (PTR_ERR(exp) == -ENOENT) return error; if (IS_ERR(exp)) return nfserrno(PTR_ERR(exp)); error = nfsd_setuser_and_check_port(rqstp, exp); if (error) goto out; /* * Look up the dentry using the NFS file handle. */ error = nfserr_stale; if (rqstp->rq_vers > 2) error = nfserr_badhandle; if (fh->fh_version != 1) { sfid.i32.ino = fh->ofh_ino; sfid.i32.gen = fh->ofh_generation; sfid.i32.parent_ino = fh->ofh_dirino; fid = &sfid; data_left = 3; if (fh->ofh_dirino == 0) fileid_type = FILEID_INO32_GEN; else fileid_type = FILEID_INO32_GEN_PARENT; } else fileid_type = fh->fh_fileid_type; if (fileid_type == FILEID_ROOT) dentry = dget(exp->ex_path.dentry); else { dentry = exportfs_decode_fh(exp->ex_path.mnt, fid, data_left, fileid_type, nfsd_acceptable, exp); } if (dentry == NULL) goto out; if (IS_ERR(dentry)) { if (PTR_ERR(dentry) != -EINVAL) error = nfserrno(PTR_ERR(dentry)); goto out; } if (S_ISDIR(dentry->d_inode->i_mode) && (dentry->d_flags & DCACHE_DISCONNECTED)) { printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); } fhp->fh_dentry = dentry; fhp->fh_export = exp; nfsd_nr_verified++; return 0; out: exp_put(exp); return error; }
struct inode *efs_iget(struct super_block *super, unsigned long ino) { int i, inode_index; dev_t device; u32 rdev; struct buffer_head *bh; struct efs_sb_info *sb = SUPER_INFO(super); struct efs_inode_info *in; efs_block_t block, offset; struct efs_dinode *efs_inode; struct inode *inode; inode = iget_locked(super, ino); if (IS_ERR(inode)) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; in = INODE_INFO(inode); /* ** EFS layout: ** ** | cylinder group | cylinder group | cylinder group ..etc ** |inodes|data |inodes|data |inodes|data ..etc ** ** work out the inode block index, (considering initially that the ** inodes are stored as consecutive blocks). then work out the block ** number of that inode given the above layout, and finally the ** offset of the inode within that block. */ inode_index = inode->i_ino / (EFS_BLOCKSIZE / sizeof(struct efs_dinode)); block = sb->fs_start + sb->first_block + (sb->group_size * (inode_index / sb->inode_blocks)) + (inode_index % sb->inode_blocks); offset = (inode->i_ino % (EFS_BLOCKSIZE / sizeof(struct efs_dinode))) * sizeof(struct efs_dinode); bh = sb_bread(inode->i_sb, block); if (!bh) { printk(KERN_WARNING "EFS: bread() failed at block %d\n", block); goto read_inode_error; } efs_inode = (struct efs_dinode *) (bh->b_data + offset); inode->i_mode = be16_to_cpu(efs_inode->di_mode); inode->i_nlink = be16_to_cpu(efs_inode->di_nlink); inode->i_uid = (uid_t)be16_to_cpu(efs_inode->di_uid); inode->i_gid = (gid_t)be16_to_cpu(efs_inode->di_gid); inode->i_size = be32_to_cpu(efs_inode->di_size); inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime); inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime); inode->i_ctime.tv_sec = be32_to_cpu(efs_inode->di_ctime); inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0; /* this is the number of blocks in the file */ if (inode->i_size == 0) { inode->i_blocks = 0; } else { inode->i_blocks = ((inode->i_size - 1) >> EFS_BLOCKSIZE_BITS) + 1; } rdev = be16_to_cpu(efs_inode->di_u.di_dev.odev); if (rdev == 0xffff) { rdev = be32_to_cpu(efs_inode->di_u.di_dev.ndev); if (sysv_major(rdev) > 0xfff) device = 0; else device = MKDEV(sysv_major(rdev), sysv_minor(rdev)); } else device = old_decode_dev(rdev); /* get the number of extents for this object */ in->numextents = be16_to_cpu(efs_inode->di_numextents); in->lastextent = 0; /* copy the extents contained within the inode to memory */ for(i = 0; i < EFS_DIRECTEXTENTS; i++) { extent_copy(&(efs_inode->di_u.di_extents[i]), &(in->extents[i])); if (i < in->numextents && in->extents[i].cooked.ex_magic != 0) { printk(KERN_WARNING "EFS: extent %d has bad magic number in inode %lu\n", i, inode->i_ino); brelse(bh); goto read_inode_error; } } brelse(bh); #ifdef DEBUG printk(KERN_DEBUG "EFS: efs_iget(): inode %lu, extents %d, mode %o\n", inode->i_ino, in->numextents, inode->i_mode); #endif switch (inode->i_mode & S_IFMT) { case S_IFDIR: inode->i_op = &efs_dir_inode_operations; inode->i_fop = &efs_dir_operations; break; case S_IFREG: inode->i_fop = &generic_ro_fops; inode->i_data.a_ops = &efs_aops; break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &efs_symlink_aops; break; case S_IFCHR: case S_IFBLK: case S_IFIFO: init_special_inode(inode, inode->i_mode, device); break; default: printk(KERN_WARNING "EFS: unsupported inode mode %o\n", inode->i_mode); goto read_inode_error; break; } unlock_new_inode(inode); return inode; read_inode_error: printk(KERN_WARNING "EFS: failed to read inode %lu\n", inode->i_ino); iget_failed(inode); return ERR_PTR(-EIO); }
void __init setup_arch(char **cmdline_p) { unsigned long bootmap_size; unsigned long start_pfn, max_pfn, max_low_pfn; #ifdef CONFIG_EARLY_PRINTK extern void enable_early_printk(void); enable_early_printk(); #endif #ifdef CONFIG_CMDLINE_BOOL strcpy(COMMAND_LINE, CONFIG_CMDLINE); #endif ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_bus(_text); code_resource.end = virt_to_bus(_etext)-1; data_resource.start = virt_to_bus(_etext); data_resource.end = virt_to_bus(_edata)-1; sh_mv_setup(cmdline_p); #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((x) << PAGE_SHIFT) /* * Find the highest page frame number we have available */ max_pfn = PFN_DOWN(__pa(memory_end)); /* * Determine low and high memory ranges: */ max_low_pfn = max_pfn; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(_end)); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, __MEMORY_START>>PAGE_SHIFT, max_low_pfn); /* * Register fully available low RAM pages with the bootmem allocator. */ { unsigned long curr_pfn, last_pfn, pages; /* * We are rounding up the start address of usable memory: */ curr_pfn = PFN_UP(__MEMORY_START); /* * ... and at the end of the usable range downwards: */ last_pfn = PFN_DOWN(__pa(memory_end)); if (last_pfn > max_low_pfn) last_pfn = max_low_pfn; pages = last_pfn - curr_pfn; free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn), PFN_PHYS(pages)); } /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step * was init_bootmem()), because this catches the (definitely buggy) * case of us accidentally initializing the bootmem allocator with * an invalid RAM area. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE, (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE); #ifdef CONFIG_BLK_DEV_INITRD ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); if (&__rd_start != &__rd_end) { LOADER_TYPE = 1; INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START; INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start; } if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, max_low_pfn << PAGE_SHIFT); initrd_start = 0; } } #endif #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif /* Perform the machine specific initialisation */ platform_setup(); paging_init(); }
static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object * obj) { if (inode && obj) { __u32 mode = obj->yst_mode; switch( obj->variantType ){ case YAFFS_OBJECT_TYPE_FILE : if( ! S_ISREG(mode) ){ obj->yst_mode &= ~S_IFMT; obj->yst_mode |= S_IFREG; } break; case YAFFS_OBJECT_TYPE_SYMLINK : if( ! S_ISLNK(mode) ){ obj->yst_mode &= ~S_IFMT; obj->yst_mode |= S_IFLNK; } break; case YAFFS_OBJECT_TYPE_DIRECTORY : if( ! S_ISDIR(mode) ){ obj->yst_mode &= ~S_IFMT; obj->yst_mode |= S_IFDIR; } break; case YAFFS_OBJECT_TYPE_UNKNOWN : case YAFFS_OBJECT_TYPE_HARDLINK : case YAFFS_OBJECT_TYPE_SPECIAL : default: break; } inode->i_ino = obj->objectId; inode->i_mode = obj->yst_mode; inode->i_uid = obj->yst_uid; inode->i_gid = obj->yst_gid; inode->i_blksize = inode->i_sb->s_blocksize; #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) inode->i_rdev = old_decode_dev(obj->yst_rdev); inode->i_atime.tv_sec = (time_t) (obj->yst_atime); inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_sec = (time_t) obj->yst_mtime; inode->i_mtime.tv_nsec = 0; inode->i_ctime.tv_sec = (time_t) obj->yst_ctime; inode->i_ctime.tv_nsec = 0; #else inode->i_rdev = obj->yst_rdev; inode->i_atime = obj->yst_atime; inode->i_mtime = obj->yst_mtime; inode->i_ctime = obj->yst_ctime; #endif inode->i_size = yaffs_GetObjectFileLength(obj); inode->i_blocks = (inode->i_size + 511) >> 9; inode->i_nlink = yaffs_GetObjectLinkCount(obj); T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_FillInode mode %x uid %d gid %d size %d count %d\n", inode->i_mode, inode->i_uid, inode->i_gid, (int)inode->i_size, atomic_read(&inode->i_count))); switch (obj->yst_mode & S_IFMT) { default: #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) init_special_inode(inode, obj->yst_mode, old_decode_dev(obj->yst_rdev)); #else init_special_inode(inode, obj->yst_mode, (dev_t) (obj->yst_rdev)); #endif break; case S_IFREG: inode->i_op = &yaffs_file_inode_operations; inode->i_fop = &yaffs_file_operations; inode->i_mapping->a_ops = &yaffs_file_address_operations; break; case S_IFDIR: inode->i_op = &yaffs_dir_inode_operations; inode->i_fop = &yaffs_dir_operations; break; case S_IFLNK: inode->i_op = &yaffs_symlink_inode_operations; break; } inode->u.generic_ip = obj; obj->myInode = inode; } else {
static int snapshot_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int error = 0; struct snapshot_data *data; loff_t offset, avail; if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) return -ENOTTY; if (!capable(CAP_SYS_ADMIN)) return -EPERM; data = filp->private_data; switch (cmd) { case SNAPSHOT_FREEZE: if (data->frozen) break; down(&pm_sem); disable_nonboot_cpus(); if (freeze_processes()) { thaw_processes(); enable_nonboot_cpus(); error = -EBUSY; } up(&pm_sem); if (!error) data->frozen = 1; break; case SNAPSHOT_UNFREEZE: if (!data->frozen) break; down(&pm_sem); thaw_processes(); enable_nonboot_cpus(); up(&pm_sem); data->frozen = 0; break; case SNAPSHOT_ATOMIC_SNAPSHOT: if (data->mode != O_RDONLY || !data->frozen || data->ready) { error = -EPERM; break; } down(&pm_sem); /* Free memory before shutting down devices. */ error = swsusp_shrink_memory(); if (!error) { error = device_suspend(PMSG_FREEZE); if (!error) { in_suspend = 1; error = swsusp_suspend(); device_resume(); } } up(&pm_sem); if (!error) error = put_user(in_suspend, (unsigned int __user *)arg); if (!error) data->ready = 1; break; case SNAPSHOT_ATOMIC_RESTORE: if (data->mode != O_WRONLY || !data->frozen || !snapshot_image_loaded(&data->handle)) { error = -EPERM; break; } down(&pm_sem); pm_prepare_console(); error = device_suspend(PMSG_FREEZE); if (!error) { error = swsusp_resume(); device_resume(); } pm_restore_console(); up(&pm_sem); break; case SNAPSHOT_FREE: swsusp_free(); memset(&data->handle, 0, sizeof(struct snapshot_handle)); data->ready = 0; break; case SNAPSHOT_SET_IMAGE_SIZE: image_size = arg; break; case SNAPSHOT_AVAIL_SWAP: avail = count_swap_pages(data->swap, 1); avail <<= PAGE_SHIFT; error = put_user(avail, (loff_t __user *)arg); break; case SNAPSHOT_GET_SWAP_PAGE: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } if (!data->bitmap) { data->bitmap = alloc_bitmap(count_swap_pages(data->swap, 0)); if (!data->bitmap) { error = -ENOMEM; break; } } offset = alloc_swap_page(data->swap, data->bitmap); if (offset) { offset <<= PAGE_SHIFT; error = put_user(offset, (loff_t __user *)arg); } else { error = -ENOSPC; } break; case SNAPSHOT_FREE_SWAP_PAGES: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } free_all_swap_pages(data->swap, data->bitmap); free_bitmap(data->bitmap); data->bitmap = NULL; break; case SNAPSHOT_SET_SWAP_FILE: if (!data->bitmap) { /* * User space encodes device types as two-byte values, * so we need to recode them */ if (old_decode_dev(arg)) { data->swap = swap_type_of(old_decode_dev(arg)); if (data->swap < 0) error = -ENODEV; } else { data->swap = -1; error = -EINVAL; } } else { error = -EPERM; } break; case SNAPSHOT_S2RAM: if (!data->frozen) { error = -EPERM; break; } if (down_trylock(&pm_sem)) { error = -EBUSY; break; } if (pm_ops->prepare) { error = pm_ops->prepare(PM_SUSPEND_MEM); if (error) goto OutS3; } /* Put devices to sleep */ error = device_suspend(PMSG_SUSPEND); if (error) { printk(KERN_ERR "Failed to suspend some devices.\n"); } else { /* Enter S3, system is already frozen */ suspend_enter(PM_SUSPEND_MEM); /* Wake up devices */ device_resume(); } if (pm_ops->finish) pm_ops->finish(PM_SUSPEND_MEM); OutS3: up(&pm_sem); break; default: error = -ENOTTY; } return error; }
static int squashfs_read_inode_2(struct inode *i, squashfs_inode_t inode) { struct super_block *s = i->i_sb; struct squashfs_sb_info *msblk = s->s_fs_info; struct squashfs_super_block *sblk = &msblk->sblk; unsigned int block = SQUASHFS_INODE_BLK(inode) + sblk->inode_table_start; unsigned int offset = SQUASHFS_INODE_OFFSET(inode); unsigned int ino = i->i_ino; long long next_block; unsigned int next_offset; union squashfs_inode_header_2 id, sid; struct squashfs_base_inode_header_2 *inodeb = &id.base, *sinodeb = &sid.base; TRACE("Entered squashfs_iget\n"); if (msblk->swap) { if (!squashfs_get_cached_block(s, (char *) sinodeb, block, offset, sizeof(*sinodeb), &next_block, &next_offset)) goto failed_read; SQUASHFS_SWAP_BASE_INODE_HEADER_2(inodeb, sinodeb, sizeof(*sinodeb)); } else if (!squashfs_get_cached_block(s, (char *) inodeb, block, offset, sizeof(*inodeb), &next_block, &next_offset)) goto failed_read; squashfs_new_inode(msblk, i, inodeb, ino); switch(inodeb->inode_type) { case SQUASHFS_FILE_TYPE: { struct squashfs_reg_inode_header_2 *inodep = &id.reg; struct squashfs_reg_inode_header_2 *sinodep = &sid.reg; long long frag_blk; unsigned int frag_size = 0; if (msblk->swap) { if (!squashfs_get_cached_block(s, (char *) sinodep, block, offset, sizeof(*sinodep), &next_block, &next_offset)) goto failed_read; SQUASHFS_SWAP_REG_INODE_HEADER_2(inodep, sinodep); } else if (!squashfs_get_cached_block(s, (char *) inodep, block, offset, sizeof(*inodep), &next_block, &next_offset)) goto failed_read; frag_blk = SQUASHFS_INVALID_BLK; if (inodep->fragment != SQUASHFS_INVALID_FRAG && !get_fragment_location_2(s, inodep->fragment, &frag_blk, &frag_size)) goto failed_read; i->i_size = inodep->file_size; i->i_fop = &generic_ro_fops; i->i_mode |= S_IFREG; i->i_mtime.tv_sec = inodep->mtime; i->i_atime.tv_sec = inodep->mtime; i->i_ctime.tv_sec = inodep->mtime; i->i_blocks = ((i->i_size - 1) >> 9) + 1; SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk; SQUASHFS_I(i)->u.s1.fragment_size = frag_size; SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset; SQUASHFS_I(i)->start_block = inodep->start_block; SQUASHFS_I(i)->u.s1.block_list_start = next_block; SQUASHFS_I(i)->offset = next_offset; if (sblk->block_size > 4096) i->i_data.a_ops = &squashfs_aops; else i->i_data.a_ops = &squashfs_aops_4K; TRACE("File inode %x:%x, start_block %x, " "block_list_start %llx, offset %x\n", SQUASHFS_INODE_BLK(inode), offset, inodep->start_block, next_block, next_offset); break; } case SQUASHFS_DIR_TYPE: { struct squashfs_dir_inode_header_2 *inodep = &id.dir; struct squashfs_dir_inode_header_2 *sinodep = &sid.dir; if (msblk->swap) { if (!squashfs_get_cached_block(s, (char *) sinodep, block, offset, sizeof(*sinodep), &next_block, &next_offset)) goto failed_read; SQUASHFS_SWAP_DIR_INODE_HEADER_2(inodep, sinodep); } else if (!squashfs_get_cached_block(s, (char *) inodep, block, offset, sizeof(*inodep), &next_block, &next_offset)) goto failed_read; i->i_size = inodep->file_size; i->i_op = &squashfs_dir_inode_ops_2; i->i_fop = &squashfs_dir_ops_2; i->i_mode |= S_IFDIR; i->i_mtime.tv_sec = inodep->mtime; i->i_atime.tv_sec = inodep->mtime; i->i_ctime.tv_sec = inodep->mtime; SQUASHFS_I(i)->start_block = inodep->start_block; SQUASHFS_I(i)->offset = inodep->offset; SQUASHFS_I(i)->u.s2.directory_index_count = 0; SQUASHFS_I(i)->u.s2.parent_inode = 0; TRACE("Directory inode %x:%x, start_block %x, offset " "%x\n", SQUASHFS_INODE_BLK(inode), offset, inodep->start_block, inodep->offset); break; } case SQUASHFS_LDIR_TYPE: { struct squashfs_ldir_inode_header_2 *inodep = &id.ldir; struct squashfs_ldir_inode_header_2 *sinodep = &sid.ldir; if (msblk->swap) { if (!squashfs_get_cached_block(s, (char *) sinodep, block, offset, sizeof(*sinodep), &next_block, &next_offset)) goto failed_read; SQUASHFS_SWAP_LDIR_INODE_HEADER_2(inodep, sinodep); } else if (!squashfs_get_cached_block(s, (char *) inodep, block, offset, sizeof(*inodep), &next_block, &next_offset)) goto failed_read; i->i_size = inodep->file_size; i->i_op = &squashfs_dir_inode_ops_2; i->i_fop = &squashfs_dir_ops_2; i->i_mode |= S_IFDIR; i->i_mtime.tv_sec = inodep->mtime; i->i_atime.tv_sec = inodep->mtime; i->i_ctime.tv_sec = inodep->mtime; SQUASHFS_I(i)->start_block = inodep->start_block; SQUASHFS_I(i)->offset = inodep->offset; SQUASHFS_I(i)->u.s2.directory_index_start = next_block; SQUASHFS_I(i)->u.s2.directory_index_offset = next_offset; SQUASHFS_I(i)->u.s2.directory_index_count = inodep->i_count; SQUASHFS_I(i)->u.s2.parent_inode = 0; TRACE("Long directory inode %x:%x, start_block %x, " "offset %x\n", SQUASHFS_INODE_BLK(inode), offset, inodep->start_block, inodep->offset); break; } case SQUASHFS_SYMLINK_TYPE: { struct squashfs_symlink_inode_header_2 *inodep = &id.symlink; struct squashfs_symlink_inode_header_2 *sinodep = &sid.symlink; if (msblk->swap) { if (!squashfs_get_cached_block(s, (char *) sinodep, block, offset, sizeof(*sinodep), &next_block, &next_offset)) goto failed_read; SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(inodep, sinodep); } else if (!squashfs_get_cached_block(s, (char *) inodep, block, offset, sizeof(*inodep), &next_block, &next_offset)) goto failed_read; i->i_size = inodep->symlink_size; i->i_op = &page_symlink_inode_operations; i->i_data.a_ops = &squashfs_symlink_aops; i->i_mode |= S_IFLNK; SQUASHFS_I(i)->start_block = next_block; SQUASHFS_I(i)->offset = next_offset; TRACE("Symbolic link inode %x:%x, start_block %llx, " "offset %x\n", SQUASHFS_INODE_BLK(inode), offset, next_block, next_offset); break; } case SQUASHFS_BLKDEV_TYPE: case SQUASHFS_CHRDEV_TYPE: { struct squashfs_dev_inode_header_2 *inodep = &id.dev; struct squashfs_dev_inode_header_2 *sinodep = &sid.dev; if (msblk->swap) { if (!squashfs_get_cached_block(s, (char *) sinodep, block, offset, sizeof(*sinodep), &next_block, &next_offset)) goto failed_read; SQUASHFS_SWAP_DEV_INODE_HEADER_2(inodep, sinodep); } else if (!squashfs_get_cached_block(s, (char *) inodep, block, offset, sizeof(*inodep), &next_block, &next_offset)) goto failed_read; i->i_mode |= (inodeb->inode_type == SQUASHFS_CHRDEV_TYPE) ? S_IFCHR : S_IFBLK; init_special_inode(i, i->i_mode, old_decode_dev(inodep->rdev)); TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(inode), offset, inodep->rdev); break; } case SQUASHFS_FIFO_TYPE: case SQUASHFS_SOCKET_TYPE: { i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE) ? S_IFIFO : S_IFSOCK; init_special_inode(i, i->i_mode, 0); break; } default: ERROR("Unknown inode type %d in squashfs_iget!\n", inodeb->inode_type); goto failed_read1; } return 1; failed_read: ERROR("Unable to read inode [%x:%x]\n", block, offset); failed_read1: return 0; }
void jffs2_read_inode (struct inode *inode) { struct jffs2_inode_info *f; struct jffs2_sb_info *c; struct jffs2_raw_inode latest_node; int ret; D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); f = JFFS2_INODE_INFO(inode); c = JFFS2_SB_INFO(inode->i_sb); jffs2_init_inode_info(f); down(&f->sem); ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); if (ret) { make_bad_inode(inode); up(&f->sem); return; } inode->i_mode = jemode_to_cpu(latest_node.mode); inode->i_uid = je16_to_cpu(latest_node.uid); inode->i_gid = je16_to_cpu(latest_node.gid); inode->i_size = je32_to_cpu(latest_node.isize); inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime)); inode->i_nlink = f->inocache->nlink; inode->i_blksize = PAGE_SIZE; inode->i_blocks = (inode->i_size + 511) >> 9; switch (inode->i_mode & S_IFMT) { jint16_t rdev; case S_IFLNK: inode->i_op = &jffs2_symlink_inode_operations; break; case S_IFDIR: { struct jffs2_full_dirent *fd; for (fd=f->dents; fd; fd = fd->next) { if (fd->type == DT_DIR && fd->ino) inode->i_nlink++; } /* and '..' */ inode->i_nlink++; /* Root dir gets i_nlink 3 for some reason */ if (inode->i_ino == 1) inode->i_nlink++; inode->i_op = &jffs2_dir_inode_operations; inode->i_fop = &jffs2_dir_operations; break; } case S_IFREG: inode->i_op = &jffs2_file_inode_operations; inode->i_fop = &jffs2_file_operations; inode->i_mapping->a_ops = &jffs2_file_address_operations; inode->i_mapping->nrpages = 0; break; case S_IFBLK: case S_IFCHR: /* Read the device numbers from the media */ D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) { /* Eep */ printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); up(&f->sem); jffs2_do_clear_inode(c, f); make_bad_inode(inode); return; } case S_IFSOCK: case S_IFIFO: inode->i_op = &jffs2_file_inode_operations; init_special_inode(inode, inode->i_mode, old_decode_dev((je16_to_cpu(rdev)))); break; default: printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino); } up(&f->sem); D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); }
/* * Perform sanity checks on the dentry in a client's file handle. * * Note that the file handle dentry may need to be freed even after * an error return. * * This is only called at the start of an nfsproc call, so fhp points to * a svc_fh which is all 0 except for the over-the-wire file handle. */ __be32 fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) { struct knfsd_fh *fh = &fhp->fh_handle; struct svc_export *exp = NULL; struct dentry *dentry; __be32 error = 0; dprintk("nfsd: fh_verify(%s)\n", SVCFH_fmt(fhp)); if (!fhp->fh_dentry) { __u32 *datap=NULL; __u32 tfh[3]; /* filehandle fragment for oldstyle filehandles */ int fileid_type; int data_left = fh->fh_size/4; error = nfserr_stale; if (rqstp->rq_client == NULL) goto out; if (rqstp->rq_vers > 2) error = nfserr_badhandle; if (rqstp->rq_vers == 4 && fh->fh_size == 0) return nfserr_nofilehandle; if (fh->fh_version == 1) { int len; datap = fh->fh_auth; if (--data_left<0) goto out; switch (fh->fh_auth_type) { case 0: break; default: goto out; } len = key_len(fh->fh_fsid_type) / 4; if (len == 0) goto out; if (fh->fh_fsid_type == FSID_MAJOR_MINOR) { /* deprecated, convert to type 3 */ len = key_len(FSID_ENCODE_DEV)/4; fh->fh_fsid_type = FSID_ENCODE_DEV; fh->fh_fsid[0] = new_encode_dev(MKDEV(ntohl(fh->fh_fsid[0]), ntohl(fh->fh_fsid[1]))); fh->fh_fsid[1] = fh->fh_fsid[2]; } if ((data_left -= len)<0) goto out; exp = exp_find(rqstp->rq_client, fh->fh_fsid_type, datap, &rqstp->rq_chandle); datap += len; } else { dev_t xdev; ino_t xino; if (fh->fh_size != NFS_FHSIZE) goto out; /* assume old filehandle format */ xdev = old_decode_dev(fh->ofh_xdev); xino = u32_to_ino_t(fh->ofh_xino); mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL); exp = exp_find(rqstp->rq_client, FSID_DEV, tfh, &rqstp->rq_chandle); } if (IS_ERR(exp) && (PTR_ERR(exp) == -EAGAIN || PTR_ERR(exp) == -ETIMEDOUT)) { error = nfserrno(PTR_ERR(exp)); goto out; } error = nfserr_stale; if (!exp || IS_ERR(exp)) goto out; /* Check if the request originated from a secure port. */ error = nfserr_perm; if (!rqstp->rq_secure && EX_SECURE(exp)) { char buf[RPC_MAX_ADDRBUFLEN]; printk(KERN_WARNING "nfsd: request from insecure port %s!\n", svc_print_addr(rqstp, buf, sizeof(buf))); goto out; } /* Set user creds for this exportpoint */ error = nfserrno(nfsd_setuser(rqstp, exp)); if (error) goto out; /* * Look up the dentry using the NFS file handle. */ error = nfserr_stale; if (rqstp->rq_vers > 2) error = nfserr_badhandle; if (fh->fh_version != 1) { tfh[0] = fh->ofh_ino; tfh[1] = fh->ofh_generation; tfh[2] = fh->ofh_dirino; datap = tfh; data_left = 3; if (fh->ofh_dirino == 0) fileid_type = 1; else fileid_type = 2; } else fileid_type = fh->fh_fileid_type; if (fileid_type == 0) dentry = dget(exp->ex_dentry); else { struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op; dentry = CALL(nop,decode_fh)(exp->ex_mnt->mnt_sb, datap, data_left, fileid_type, nfsd_acceptable, exp); } if (dentry == NULL) goto out; if (IS_ERR(dentry)) { if (PTR_ERR(dentry) != -EINVAL) error = nfserrno(PTR_ERR(dentry)); goto out; } if (S_ISDIR(dentry->d_inode->i_mode) && (dentry->d_flags & DCACHE_DISCONNECTED)) { printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n", dentry->d_parent->d_name.name, dentry->d_name.name); } fhp->fh_dentry = dentry; fhp->fh_export = exp; nfsd_nr_verified++; } else { /* just rechecking permissions * (e.g. nfsproc_create calls fh_verify, then nfsd_create does as well) */ dprintk("nfsd: fh_verify - just checking\n"); dentry = fhp->fh_dentry; exp = fhp->fh_export; /* Set user creds for this exportpoint; necessary even * in the "just checking" case because this may be a * filehandle that was created by fh_compose, and that * is about to be used in another nfsv4 compound * operation */ error = nfserrno(nfsd_setuser(rqstp, exp)); if (error) goto out; } cache_get(&exp->h); error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); if (error) goto out; /* Finally, check access permissions. */ error = nfsd_permission(exp, dentry, access); if (error) { dprintk("fh_verify: %s/%s permission failure, " "acc=%x, error=%d\n", dentry->d_parent->d_name.name, dentry->d_name.name, access, ntohl(error)); } out: if (exp && !IS_ERR(exp)) exp_put(exp); if (error == nfserr_stale) nfsdstats.fh_stale++; return error; }
void __init setup_arch(char **cmdline_p) { enable_mmu(); ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_phys(_text); code_resource.end = virt_to_phys(_etext)-1; data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START; memory_end = memory_start + __MEMORY_SIZE; #ifdef CONFIG_CMDLINE_BOOL strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); #else strlcpy(command_line, COMMAND_LINE, sizeof(command_line)); #endif /* Save unparsed command line copy for /proc/cmdline */ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; parse_early_param(); sh_mv_setup(); /* * Find the highest page frame number we have available */ max_pfn = PFN_DOWN(__pa(memory_end)); /* * Determine low and high memory ranges: */ max_low_pfn = max_pfn; min_low_pfn = __MEMORY_START >> PAGE_SHIFT; nodes_clear(node_online_map); /* Setup bootmem with available RAM */ setup_memory(); sparse_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif /* Perform the machine specific initialisation */ if (likely(sh_mv.mv_setup)) sh_mv.mv_setup(cmdline_p); paging_init(); }
void __init setup_arch(char **cmdline_p) { unsigned long kernel_end; #if defined(CONFIG_XEN_PRIVILEGED_GUEST) struct e820entry *machine_e820; struct xen_memory_map memmap; #endif #ifdef CONFIG_XEN /* Register a call for panic conditions. */ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); kernel_end = 0; /* dummy */ screen_info = SCREEN_INFO; if (xen_start_info->flags & SIF_INITDOMAIN) { /* This is drawn from a dump from vgacon:startup in * standard Linux. */ screen_info.orig_video_mode = 3; screen_info.orig_video_isVGA = 1; screen_info.orig_video_lines = 25; screen_info.orig_video_cols = 80; screen_info.orig_video_ega_bx = 3; screen_info.orig_video_points = 16; } else screen_info.orig_video_isVGA = 0; edid_info = EDID_INFO; saved_video_mode = SAVED_VIDEO_MODE; bootloader_type = LOADER_TYPE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif setup_xen_features(); HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); ARCH_SETUP #else ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); screen_info = SCREEN_INFO; edid_info = EDID_INFO; saved_video_mode = SAVED_VIDEO_MODE; bootloader_type = LOADER_TYPE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif #endif /* !CONFIG_XEN */ setup_memory_region(); copy_edd(); if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) &_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; #ifndef CONFIG_XEN code_resource.start = virt_to_phys(&_text); code_resource.end = virt_to_phys(&_etext)-1; data_resource.start = virt_to_phys(&_etext); data_resource.end = virt_to_phys(&_edata)-1; #endif parse_cmdline_early(cmdline_p); early_identify_cpu(&boot_cpu_data); /* * partially used pages are not usable - thus * we are rounding upwards: */ end_pfn = e820_end_of_ram(); num_physpages = end_pfn; /* for pfn_valid */ check_efer(); #ifndef CONFIG_XEN discover_ebda(); #endif init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); #ifdef CONFIG_ACPI_NUMA /* * Parse SRAT to discover nodes. */ acpi_numa_init(); #endif #ifdef CONFIG_NUMA numa_initmem_init(0, end_pfn); #else contig_initmem_init(0, end_pfn); #endif /* Reserve direct mapping */ reserve_bootmem_generic(table_start << PAGE_SHIFT, (table_end - table_start) << PAGE_SHIFT); /* reserve kernel */ kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE); reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY); #ifdef CONFIG_XEN /* reserve physmap, start info and initial page tables */ reserve_bootmem(kernel_end, (table_start<<PAGE_SHIFT)-kernel_end); #else /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_generic(0, PAGE_SIZE); /* reserve ebda region */ if (ebda_addr) reserve_bootmem_generic(ebda_addr, ebda_size); #endif #ifdef CONFIG_SMP /* * But first pinch a few for the stack/trampoline stuff * FIXME: Don't need the extra page at 4K, but need to fix * trampoline before removing it. (see the GDT stuff) */ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE); /* Reserve SMP trampoline */ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE); #endif #ifdef CONFIG_ACPI_SLEEP /* * Reserve low memory region for sleep support. */ acpi_reserve_bootmem(); #endif #ifdef CONFIG_XEN #ifdef CONFIG_BLK_DEV_INITRD if (xen_start_info->mod_start) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/ initrd_start = INITRD_START + PAGE_OFFSET; initrd_end = initrd_start+INITRD_SIZE; initrd_below_start_ok = 1; } else { printk(KERN_ERR "initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", (unsigned long)(INITRD_START + INITRD_SIZE), (unsigned long)(end_pfn << PAGE_SHIFT)); initrd_start = 0; } } #endif #else /* CONFIG_XEN */ #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { reserve_bootmem_generic(INITRD_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start+INITRD_SIZE; } else { printk(KERN_ERR "initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", (unsigned long)(INITRD_START + INITRD_SIZE), (unsigned long)(end_pfn << PAGE_SHIFT)); initrd_start = 0; } } #endif #endif /* !CONFIG_XEN */ #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) { reserve_bootmem(crashk_res.start, crashk_res.end - crashk_res.start + 1); } #endif paging_init(); #ifdef CONFIG_X86_LOCAL_APIC /* * Find and reserve possible boot-time SMP configuration: */ find_smp_config(); #endif #ifdef CONFIG_XEN { int i, j, k, fpp; unsigned long va; /* 'Initial mapping' of initrd must be destroyed. */ for (va = xen_start_info->mod_start; va < (xen_start_info->mod_start+xen_start_info->mod_len); va += PAGE_SIZE) { HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Make sure we have a large enough P->M table. */ phys_to_machine_mapping = alloc_bootmem( end_pfn * sizeof(unsigned long)); memset(phys_to_machine_mapping, ~0, end_pfn * sizeof(unsigned long)); memcpy(phys_to_machine_mapping, (unsigned long *)xen_start_info->mfn_list, xen_start_info->nr_pages * sizeof(unsigned long)); free_bootmem( __pa(xen_start_info->mfn_list), PFN_PHYS(PFN_UP(xen_start_info->nr_pages * sizeof(unsigned long)))); /* Destroyed 'initial mapping' of old p2m table. */ for (va = xen_start_info->mfn_list; va < (xen_start_info->mfn_list + (xen_start_info->nr_pages*sizeof(unsigned long))); va += PAGE_SIZE) { HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); } /* * Initialise the list of the frames that specify the * list of frames that make up the p2m table. Used by * save/restore. */ pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE); HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(pfn_to_mfn_frame_list_list); fpp = PAGE_SIZE/sizeof(unsigned long); for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) { if ((j % fpp) == 0) { k++; BUG_ON(k>=fpp); pfn_to_mfn_frame_list[k] = alloc_bootmem(PAGE_SIZE); pfn_to_mfn_frame_list_list[k] = virt_to_mfn(pfn_to_mfn_frame_list[k]); j=0; } pfn_to_mfn_frame_list[k][j] = virt_to_mfn(&phys_to_machine_mapping[i]); } HYPERVISOR_shared_info->arch.max_pfn = end_pfn; } } if (xen_start_info->flags & SIF_INITDOMAIN) dmi_scan_machine(); if ( ! (xen_start_info->flags & SIF_INITDOMAIN)) { acpi_disabled = 1; #ifdef CONFIG_ACPI acpi_ht = 0; #endif } #endif #ifndef CONFIG_XEN check_ioapic(); #endif zap_low_mappings(0); /* * set this early, so we dont allocate cpu0 * if MADT list doesnt list BSP first * mpparse.c/MP_processor_info() allocates logical cpu numbers. */ cpu_set(0, cpu_present_map); #ifdef CONFIG_ACPI /* * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). * Call this early for SRAT node setup. */ acpi_boot_table_init(); /* * Read APIC and some other early information from ACPI tables. */ acpi_boot_init(); #endif init_cpu_to_node(); #ifdef CONFIG_X86_LOCAL_APIC /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); #ifndef CONFIG_XEN init_apic_mappings(); #endif #endif #if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) prefill_possible_map(); #endif /* * Request address space for all standard RAM and ROM resources * and also for regions reported as reserved by the e820. */ #if defined(CONFIG_XEN_PRIVILEGED_GUEST) probe_roms(); if (xen_start_info->flags & SIF_INITDOMAIN) { machine_e820 = alloc_bootmem_low_pages(PAGE_SIZE); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, machine_e820); BUG_ON(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap)); e820_reserve_resources(machine_e820, memmap.nr_entries); } #elif !defined(CONFIG_XEN) probe_roms(); e820_reserve_resources(e820.map, e820.nr_map); #endif request_resource(&iomem_resource, &video_ram_resource); { unsigned i; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, &standard_io_resources[i]); } #if defined(CONFIG_XEN_PRIVILEGED_GUEST) if (xen_start_info->flags & SIF_INITDOMAIN) { e820_setup_gap(machine_e820, memmap.nr_entries); free_bootmem(__pa(machine_e820), PAGE_SIZE); } #elif !defined(CONFIG_XEN) e820_setup_gap(e820.map, e820.nr_map); #endif #ifdef CONFIG_GART_IOMMU iommu_hole_init(); #endif #ifdef CONFIG_XEN { struct physdev_set_iopl set_iopl; set_iopl.iopl = 1; HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); if (xen_start_info->flags & SIF_INITDOMAIN) { if (!(xen_start_info->flags & SIF_PRIVILEGED)) panic("Xen granted us console access " "but not privileged status"); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif } else { extern int console_use_vt; console_use_vt = 0; } } #else /* CONFIG_XEN */ #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif #endif /* !CONFIG_XEN */ }
void __init setup_arch(char **cmdline_p) { printk(KERN_INFO "Command line: %s\n", saved_command_line); ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); screen_info = SCREEN_INFO; edid_info = EDID_INFO; saved_video_mode = SAVED_VIDEO_MODE; bootloader_type = LOADER_TYPE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif #ifdef CONFIG_EFI if (!strncmp(EFI_LOADER_SIG, "EFIL", 4)) efi_enabled = 1; #endif setup_memory_region(); copy_edd(); if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) &_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; init_mm.pgd = __va(__pa_symbol(&init_level4_pgt)); code_resource.start = __pa_symbol(&_text); code_resource.end = __pa_symbol(&_etext)-1; data_resource.start = __pa_symbol(&_etext); data_resource.end = __pa_symbol(&_edata)-1; parse_cmdline_early(cmdline_p); finish_e820_parsing(); early_identify_cpu(&boot_cpu_data); /* * partially used pages are not usable - thus * we are rounding upwards: */ end_pfn = e820_end_of_ram(); num_physpages = end_pfn; /* for pfn_valid */ check_efer(); discover_ebda(); init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); if (efi_enabled) efi_map_memmap(); dmi_scan_machine(); /* * VMware detection requires dmi to be available, so this * needs to be done after dmi_scan_machine, for the BP. */ init_hypervisor(&boot_cpu_data); /* * init_hypervisor gets called more than one time throughout * the life of the cpu, but that hurts kvm. We only need it * once, so we do it explicitly here */ if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_KVM) kvmclock_init(); #ifdef CONFIG_ACPI /* * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). * Call this early for SRAT node setup. */ acpi_boot_table_init(); #endif #ifdef CONFIG_ACPI_NUMA /* * Parse SRAT to discover nodes. */ acpi_numa_init(); #endif #ifdef CONFIG_NUMA numa_initmem_init(0, end_pfn); #else contig_initmem_init(0, end_pfn); #endif /* Reserve direct mapping */ reserve_bootmem_generic(table_start << PAGE_SHIFT, (table_end - table_start) << PAGE_SHIFT, BOOTMEM_DEFAULT); /* reserve kernel */ reserve_bootmem_generic(__pa_symbol(&_text), __pa_symbol(&_end) - __pa_symbol(&_text), BOOTMEM_DEFAULT); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_generic(0, PAGE_SIZE, BOOTMEM_DEFAULT); /* reserve ebda region */ if (ebda_addr) reserve_bootmem_generic(ebda_addr, ebda_size, BOOTMEM_DEFAULT); #ifdef CONFIG_SMP /* Reserve SMP trampoline */ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE, BOOTMEM_DEFAULT); #endif #ifdef CONFIG_ACPI_SLEEP /* * Reserve low memory region for sleep support. */ acpi_reserve_bootmem(); #endif #ifdef CONFIG_X86_LOCAL_APIC /* * Find and reserve possible boot-time SMP configuration: */ find_smp_config(); #endif #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) { reserve_bootmem_generic(INITRD_START, INITRD_SIZE, BOOTMEM_DEFAULT); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start+INITRD_SIZE; } else { printk(KERN_ERR "initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", (unsigned long)(INITRD_START + INITRD_SIZE), (unsigned long)(end_pfn << PAGE_SHIFT)); initrd_start = 0; } } #endif #ifdef CONFIG_KEXEC if ((crashk_res.start < crashk_res.end) && (crashk_res.end <= (end_pfn << PAGE_SHIFT))) { if (reserve_bootmem_generic(crashk_res.start, crashk_res.end - crashk_res.start + 1, BOOTMEM_EXCLUSIVE) < 0) { printk(KERN_ERR "crashkernel reservation failed - " "memory is in use\n"); crashk_res.start = crashk_res.end = 0; } } else { printk(KERN_ERR "Memory for crash kernel (0x%lx to 0x%lx) not" "within permissible range\ndisabling kdump\n", crashk_res.start, crashk_res.end); crashk_res.end = 0; crashk_res.start = 0; } #endif reserve_ibft_region(); paging_init(); check_ioapic(); /* * set this early, so we dont allocate cpu0 * if MADT list doesnt list BSP first * mpparse.c/MP_processor_info() allocates logical cpu numbers. */ cpu_set(0, cpu_present_map); #ifdef CONFIG_ACPI /* * Read APIC and some other early information from ACPI tables. */ acpi_boot_init(); #endif init_cpu_to_node(); #ifdef CONFIG_X86_LOCAL_APIC /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); init_apic_mappings(); #endif /* * Request address space for all standard RAM and ROM resources * and also for regions reported as reserved by the e820. */ probe_roms(); e820_reserve_resources(); e820_mark_nosave_regions(); request_resource(&iomem_resource, &video_ram_resource); { unsigned i; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, &standard_io_resources[i]); } e820_setup_gap(); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif }