XCamReturn FakePollThread::poll_buffer_loop () { XCamReturn ret = XCAM_RETURN_NO_ERROR; if (!_buf_pool.ptr () && init_buffer_pool () != XCAM_RETURN_NO_ERROR) return XCAM_RETURN_ERROR_MEM; SmartPtr<DrmBoBuffer> buf = _buf_pool->get_buffer (_buf_pool).dynamic_cast_ptr<DrmBoBuffer> (); if (!buf.ptr ()) { XCAM_LOG_WARNING ("FakePollThread get buffer failed"); return XCAM_RETURN_ERROR_MEM; } ret = read_buf (buf); if (ret == XCAM_RETURN_BYPASS) { ret = read_buf (buf); } SmartPtr<VideoBuffer> video_buf = buf; if (ret == XCAM_RETURN_NO_ERROR && _poll_callback) return _poll_callback->poll_buffer_ready (video_buf); return ret; }
struct filsys *open_filesystem(vfs_devno_t devno) { struct filsys *fs; struct groupdesc *gd; unsigned int i; // Allocate file system fs = (struct filsys *) malloc(sizeof(struct filsys)); memset(fs, 0, sizeof(struct filsys)); // Allocate and read super block fs->super = (struct superblock *) malloc(SECTORSIZE); memset(fs->super, 0, SECTORSIZE); if (dev_read(devno, fs->super, SECTORSIZE, 1) != SECTORSIZE) panic("unable to read superblock"); fs->super_dirty = 0; // Check signature and version if (fs->super->signature != DFS_SIGNATURE) panic("invalid DFS signature"); if (fs->super->version != DFS_VERSION) panic("invalid DFS version"); // Set device number and block size fs->devno = devno; fs->blocksize = 1 << fs->super->log_block_size; fs->inodes_per_block = fs->blocksize / sizeof(struct inodedesc); // Initialize buffer cache fs->cache = init_buffer_pool(devno, CACHEBUFFERS, fs->blocksize); // Calculate the number of group descriptors blocks fs->groupdescs_per_block = fs->blocksize / sizeof(struct groupdesc); fs->groupdesc_blocks = (fs->super->group_count * sizeof(struct groupdesc) + fs->blocksize - 1) / fs->blocksize; // Calculate the number of block pointers per block directory page fs->log_blkptrs_per_block = fs->super->log_block_size - 2; // Read group descriptors fs->groupdesc_buffers = (struct buf **) malloc(sizeof(struct buf *) * fs->groupdesc_blocks); fs->groups = (struct group *) malloc(sizeof(struct group) * fs->super->group_count); for (i = 0; i < fs->groupdesc_blocks; i++) { fs->groupdesc_buffers[i] = get_buffer(fs->cache, fs->super->groupdesc_table_block + i); } for (i = 0; i < fs->super->group_count; i++) { gd = (struct groupdesc *) fs->groupdesc_buffers[i / fs->groupdescs_per_block]->data; gd += (i % fs->groupdescs_per_block); fs->groups[i].desc = gd; fs->groups[i].first_free_block = -1; fs->groups[i].first_free_inode = -1; } return fs; }
static init_super_block(struct lmfs_address *addr, u32 data_start, u32 free_block, u32 block_cnt) { struct lmfs_super_block *sb = addr->p_sb; char *disk = (char *)sb; int i = 0; memcpy((char *)&sb->magic, "LMFS", 4); sb->n_blocks = block_cnt; sb->start_block = 0; sb->super_block_size = BLOCK_SIZE; /* init inode buffer pool base is 0*/ sb->ib_pool_start_block = ((char *)addr->p_ib - disk) / BLOCK_SIZE; sb->ib_pool_current_block = baligin(MAX_FILE * sizeof(u32), BLOCK_SIZE) / BLOCK_SIZE - 1; sb->ib_pool_current_block += sb->ib_pool_start_block; sb->ib_pool_pos = MAX_FILE - 2; // 14 15 inodes sb->ib_pool_block_nr = addr->ib_size / BLOCK_SIZE; init_buffer_pool(addr->p_ib, 1, MAX_FILE -1); // base is 1 since 0 for root sb->db_pool_start_block = ((char *)addr->p_db - disk) /BLOCK_SIZE; sb->db_pool_current_block = baligin(free_block * sizeof(u32), BLOCK_SIZE) / BLOCK_SIZE -1; sb->db_pool_current_block += sb->db_pool_start_block; sb->db_pool_pos = free_block - 1; sb->db_pool_block_nr = addr->db_size / BLOCK_SIZE; init_buffer_pool(addr->p_db, data_start, free_block); sb->inode_start_block = ((char *)addr->p_inode - disk) / BLOCK_SIZE; sb->inode_free = MAX_FILE; sb->inode_nr = MAX_FILE; sb->inode_n_blocks = addr->inode_size / BLOCK_SIZE; sb->data_start_block = data_start; sb->data_free_blocks = free_block; sb->root_inode_index = 0; sb->ib_pool = 0; sb->db_pool = 0; dump_super_block(sb); return 0; }
XCamReturn V4l2Device::start () { XCamReturn ret = XCAM_RETURN_NO_ERROR; // request buffer first ret = request_buffer (); XCAM_FAIL_RETURN ( ERROR, ret == XCAM_RETURN_NO_ERROR, ret, "device(%s) start failed", XCAM_STR (_name)); //alloc buffers ret = init_buffer_pool (); XCAM_FAIL_RETURN ( ERROR, ret == XCAM_RETURN_NO_ERROR, ret, "device(%s) start failed", XCAM_STR (_name)); //queue all buffers for (uint32_t i = 0; i < _buf_count; ++i) { SmartPtr<V4l2Buffer> &buf = _buf_pool [i]; XCAM_ASSERT (buf.ptr()); XCAM_ASSERT (buf->get_buf().index == i); ret = queue_buffer (buf); if (ret != XCAM_RETURN_NO_ERROR) { XCAM_LOG_ERROR ( "device(%s) start failed on queue index:%d", XCAM_STR (_name), i); stop (); return ret; } } // stream on if (io_control (VIDIOC_STREAMON, &_capture_buf_type) < 0) { XCAM_LOG_ERROR ( "device(%s) start failed on VIDIOC_STREAMON", XCAM_STR (_name)); stop (); return XCAM_RETURN_ERROR_IOCTL; } _active = true; XCAM_LOG_INFO ("device(%s) started successfully", XCAM_STR (_name)); return XCAM_RETURN_NO_ERROR; }
struct filsys *create_filesystem(vfs_devno_t devno, int blocksize, int inode_ratio, int quick) { struct filsys *fs; unsigned int blocks; unsigned int first_block; struct groupdesc *gd; struct buf *buf; unsigned int i, j; vfs_ino_t ino; struct inode *root; char *buffer; // Allocate file system fs = (struct filsys *) malloc(sizeof(struct filsys)); memset(fs, 0, sizeof(struct filsys)); // Allocate super block fs->super = (struct superblock *) malloc(SECTORSIZE); memset(fs->super, 0, SECTORSIZE); fs->super_dirty = 1; // Set device number and block size fs->devno = devno; fs->blocksize = blocksize; // Initialize buffer cache fs->cache = init_buffer_pool(devno, CACHEBUFFERS, fs->blocksize); // Set signature, version and block size in super block fs->super->signature = DFS_SIGNATURE; fs->super->version = DFS_VERSION; fs->super->log_block_size = bits(blocksize); // Each group has as many blocks as can be represented by the block bitmap block fs->super->blocks_per_group = fs->blocksize * 8; // Get the device size in sectors from the device and convert it to blocks fs->super->block_count = dev_getsize(fs->devno) / (fs->blocksize / SECTORSIZE); // Set cache size fs->super->cache_buffers = CACHEBUFFERS; if (fs->super->cache_buffers > fs->super->block_count) fs->super->cache_buffers = 64; // The number of inodes in a group is computed as a ratio of the size of group fs->inodes_per_block = fs->blocksize / sizeof(struct inodedesc); if (fs->super->blocks_per_group < fs->super->block_count) fs->super->inodes_per_group = fs->blocksize * fs->super->blocks_per_group / inode_ratio; else fs->super->inodes_per_group = fs->blocksize * fs->super->block_count / inode_ratio; if (fs->super->inodes_per_group > fs->blocksize * 8) fs->super->inodes_per_group = fs->blocksize * 8; fs->inode_blocks_per_group = (fs->super->inodes_per_group * sizeof(struct inodedesc) + fs->blocksize - 1) / fs->blocksize; // Calculate the number of block pointers per block directory page fs->log_blkptrs_per_block = fs->super->log_block_size - 2; // Calculate the number of group descriptors and the number of blocks to store them fs->super->group_count = (fs->super->block_count + fs->super->blocks_per_group - 1) / fs->super->blocks_per_group; fs->groupdescs_per_block = fs->blocksize / sizeof(struct groupdesc); fs->groupdesc_blocks = (fs->super->group_count * sizeof(struct groupdesc) + fs->blocksize - 1) / fs->blocksize; // The reserved blocks are allocated right after the super block fs->super->first_reserved_block = 1; if (fs->blocksize <= SECTORSIZE) fs->super->first_reserved_block++; fs->super->reserved_blocks = RESERVED_BLOCKS; // The group descriptor table starts after the superblock and reserved blocks fs->super->groupdesc_table_block = fs->super->first_reserved_block + fs->super->reserved_blocks; // If the last group is too small to hold the bitmaps and inode table skip it blocks = fs->super->block_count % fs->super->blocks_per_group; if (blocks > 0 && blocks < fs->inode_blocks_per_group + 2) fs->super->group_count--; if (fs->super->group_count == 0) panic("filesystem too small"); // Zero all blocks on disk if (!quick) { buffer = (char *) malloc(fs->blocksize); memset(buffer, 0, fs->blocksize); for (i = fs->super->groupdesc_table_block + fs->groupdesc_blocks; i < fs->super->block_count; i++) { dev_write(fs->devno, buffer, fs->blocksize, i); } free(buffer); } // Allocate group descriptors fs->groupdesc_buffers = (struct buf **) malloc(sizeof(struct buf *) * fs->groupdesc_blocks); fs->groups = (struct group *) malloc(sizeof(struct group) * fs->super->group_count); for (i = 0; i < fs->groupdesc_blocks; i++) { fs->groupdesc_buffers[i] = alloc_buffer(fs->cache, fs->super->groupdesc_table_block + i); } for (i = 0; i < fs->super->group_count; i++) { gd = (struct groupdesc *) fs->groupdesc_buffers[i / fs->groupdescs_per_block]->data; gd += (i % fs->groupdescs_per_block); fs->groups[i].desc = gd; fs->groups[i].first_free_block = 0; fs->groups[i].first_free_inode = 0; } // Reserve inode for root directory fs->super->reserved_inodes = RESERVED_INODES; // Set inode count based on group count fs->super->inode_count = fs->super->inodes_per_group * fs->super->group_count; // All blocks and inodes initially free fs->super->free_inode_count = fs->super->inode_count; fs->super->free_block_count = fs->super->block_count; // Initialize block bitmaps for (i = 0; i < fs->super->group_count; i++) { gd = fs->groups[i].desc; blocks = 0; first_block = fs->super->blocks_per_group * i; // The first group needs blocks for the super block and the group descriptors if (i == 0) blocks = fs->super->groupdesc_table_block + fs->groupdesc_blocks; // Next blocks in group are the block bitmap, inode bitmap and the inode table gd->block_bitmap_block = first_block + blocks++; gd->inode_bitmap_block = first_block + blocks++; gd->inode_table_block = first_block + blocks; blocks += fs->inode_blocks_per_group; // Update block bitmap buf = alloc_buffer(fs->cache, gd->block_bitmap_block); set_bits(buf->data, 0, blocks); mark_buffer_updated(buf); release_buffer(fs->cache, buf); // Determine the block count for the group. The last group may be truncated if (fs->super->blocks_per_group * (i + 1) > fs->super->block_count) gd->block_count = fs->super->block_count - fs->super->blocks_per_group * i; else gd->block_count = fs->super->blocks_per_group; // Set the count of free blocks and inodes for group gd->free_inode_count = fs->super->inodes_per_group; gd->free_block_count = gd->block_count - blocks; // Update super block fs->super->free_block_count -= blocks; mark_group_desc_dirty(fs, i); } // Zero out block and inode bitmaps and inode tables if (quick) { buffer = (char *) malloc(fs->blocksize); memset(buffer, 0, fs->blocksize); for (i = 0; i < fs->super->group_count; i++) { gd = fs->groups[i].desc; dev_write(fs->devno, buffer, fs->blocksize, gd->block_bitmap_block); dev_write(fs->devno, buffer, fs->blocksize, gd->inode_bitmap_block); for (j = 0; j < fs->inode_blocks_per_group; j++) { dev_write(fs->devno, buffer, fs->blocksize, gd->inode_table_block + j); } } free(buffer); } // Reserve inodes for (i = 0; i < RESERVED_INODES; i++) { ino = new_inode(fs, 0, 0); if (ino != i) panic("unexpected inode"); } // Create root directory root = get_inode(fs, DFS_INODE_ROOT); root->desc->mode = VFS_S_IFDIR | VFS_S_IRWXU | VFS_S_IRWXG | VFS_S_IRWXO; root->desc->ctime = root->desc->mtime = time(NULL); root->desc->linkcount = 1; mark_buffer_updated(root->buf); release_inode(root); return fs; }
int cdfs_mount(struct fs *fs, char *opts) { struct cdfs *cdfs; dev_t devno; int cachebufs; int rc; int blk; struct buf *buf; struct iso_volume_descriptor *vd; // Check device devno = dev_open(fs->mntfrom); if (devno == NODEV) return -ENODEV; if (device(devno)->driver->type != DEV_TYPE_BLOCK) return -ENOTBLK; // Revalidate device and check block size if (get_option(opts, "revalidate", NULL, 0, NULL)) { rc = dev_ioctl(devno, IOCTL_REVALIDATE, NULL, 0); if (rc < 0) return rc; } if (dev_ioctl(devno, IOCTL_GETBLKSIZE, NULL, 0) != CDFS_BLOCKSIZE) return -ENXIO; // Allocate file system cdfs = (struct cdfs *) kmalloc(sizeof(struct cdfs)); memset(cdfs, 0, sizeof(struct cdfs)); cdfs->devno = devno; cdfs->blks = dev_ioctl(devno, IOCTL_GETDEVSIZE, NULL, 0); if (cdfs->blks < 0) return cdfs->blks; // Allocate cache cachebufs = get_num_option(opts, "cache", CDFS_DEFAULT_CACHESIZE); cdfs->cache = init_buffer_pool(devno, cachebufs, CDFS_BLOCKSIZE, NULL, cdfs); if (!cdfs->cache) return -ENOMEM; // Read volume descriptors cdfs->vdblk = 0; blk = 16; while (1) { int type; unsigned char *esc; buf = get_buffer(cdfs->cache, blk); if (!buf) return -EIO; vd = (struct iso_volume_descriptor *) buf->data; type = isonum_711(vd->type); esc = vd->escape_sequences; if (memcmp(vd->id, "CD001", 5) != 0) { free_buffer_pool(cdfs->cache); dev_close(cdfs->devno); kfree(cdfs); return -EIO; } if (cdfs->vdblk == 0 && type == ISO_VD_PRIMARY) { cdfs->vdblk = blk; } else if (type == ISO_VD_SUPPLEMENTAL && esc[0] == 0x25 && esc[1] == 0x2F && (esc[2] == 0x40 || esc[2] == 0x43 || esc[2] == 0x45)) { cdfs->vdblk = blk; cdfs->joliet = 1; } release_buffer(cdfs->cache, buf); if (type == ISO_VD_END) break; blk++; } if (cdfs->vdblk == 0) return -EIO; // Initialize filesystem from selected volume descriptor and read path table buf = get_buffer(cdfs->cache, cdfs->vdblk); if (!buf) return -EIO; vd = (struct iso_volume_descriptor *) buf->data; cdfs->volblks = isonum_733(vd->volume_space_size); rc = cdfs_read_path_table(cdfs, vd); if (rc < 0) return rc; release_buffer(cdfs->cache, buf); // Device mounted successfully fs->data = cdfs; return 0; }