static request_queue_t *sd_find_queue(kdev_t dev) { Scsi_Disk *dpnt; int target; target = DEVICE_NR(dev); dpnt = &rscsi_disks[target]; if (!dpnt->device) return NULL; /* No such device */ return &dpnt->device->request_queue; }
static int pd_check_media( kdev_t dev) { int r, unit; unit = DEVICE_NR(dev); if ((unit >= PD_UNITS) || (!PD.present)) return -ENODEV; if (!PD.removable) return 0; pd_media_check(unit); r = PD.changed; PD.changed = 0; return r; }
/* * Tell the block layer where to queue a request. */ request_queue_t *sbull_find_queue(kdev_t device) { int devno = DEVICE_NR(device); if (devno >= sbull_devs) { static int count = 0; if (count++ < 5) /* print the message at most five times */ printk(KERN_WARNING "sbull: request for unknown device\n"); return NULL; } return &sbull_devices[devno].queue; }
static int rd_release(struct inode *inode, struct file *filp) { int target = DEVICE_NR(inode->i_rdev); debug1("RD: release ram%d\n", target); #if 0 rd_info[target].flags = 0; /* ioctl() takes care of it */ #endif return 0; }
/* xd_open: open a device */ static int xd_open (struct inode *inode,struct file *file) { int dev = DEVICE_NR(inode->i_rdev); if (dev < xd_drives) { while (!xd_valid[dev]) sleep_on(&xd_wait_open); xd_access[dev]++; return (0); } return -ENXIO; }
static int pd_ioctl(struct inode *inode,struct file *file, unsigned int cmd, unsigned long arg) { struct hd_geometry *geo = (struct hd_geometry *) arg; int dev, err, unit; if ((!inode) || (!inode->i_rdev)) return -EINVAL; dev = MINOR(inode->i_rdev); unit = DEVICE_NR(inode->i_rdev); if (dev >= PD_DEVS) return -EINVAL; if (!PD.present) return -ENODEV; switch (cmd) { case CDROMEJECT: if (PD.access == 1) pd_eject(unit); return 0; case HDIO_GETGEO: if (!geo) return -EINVAL; err = verify_area(VERIFY_WRITE,geo,sizeof(*geo)); if (err) return err; if (PD.alt_geom) { put_user(PD.capacity/(PD_LOG_HEADS*PD_LOG_SECTS), (short *) &geo->cylinders); put_user(PD_LOG_HEADS, (char *) &geo->heads); put_user(PD_LOG_SECTS, (char *) &geo->sectors); } else { put_user(PD.cylinders, (short *) &geo->cylinders); put_user(PD.heads, (char *) &geo->heads); put_user(PD.sectors, (char *) &geo->sectors); } put_user(pd_hd[dev].start_sect,(long *)&geo->start); return 0; case BLKRRPART: if (!capable(CAP_SYS_ADMIN)) return -EACCES; return pd_revalidate(inode->i_rdev); case BLKGETSIZE: case BLKGETSIZE64: case BLKROSET: case BLKROGET: case BLKRASET: case BLKRAGET: case BLKFLSBUF: case BLKPG: return blk_ioctl(inode->i_rdev, cmd, arg); default: return -EINVAL; } }
static void sd_release(struct inode * inode, struct file * file) { int target; sync_dev(inode->i_rdev); target = DEVICE_NR(MINOR(inode->i_rdev)); rscsi_disks[target].device->access_count--; if(rscsi_disks[target].device->removable) { if(!rscsi_disks[target].device->access_count) sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0); }; }
static int rd_open(struct inode *inode, struct file *filp) { int target = DEVICE_NR(inode->i_rdev); debug1("RD: open ram%d\n", target); if((rd_initialised == 0) || (target >= MAX_ENTRIES)) return -ENXIO; #if 0 if (rd_info[target].flags && RD_BUSY) return -EBUSY; #endif inode->i_size = (rd_info[target].size) << 9; return 0; }
static int pd_open (struct inode *inode, struct file *file) { int unit = DEVICE_NR(inode->i_rdev); if ((unit >= PD_UNITS) || (!PD.present)) return -ENODEV; wait_event (pd_wait_open, pd_valid); PD.access++; if (PD.removable) { pd_media_check(unit); pd_doorlock(unit,IDE_DOORLOCK); } return 0; }
/* * Find the device for this request. */ static Sbull_Dev *sbull_locate_device(const struct request *req) { int devno; Sbull_Dev *device; /* Check if the minor number is in range */ devno = DEVICE_NR(req->rq_dev); if (devno >= sbull_devs) { static int count = 0; if (count++ < 5) /* print the message at most five times */ printk(KERN_WARNING "sbull: request for unknown device\n"); return NULL; } device = sbull_devices + devno; return device; }
static int pd_release (struct inode *inode, struct file *file) { kdev_t devp; int unit; devp = inode->i_rdev; unit = DEVICE_NR(devp); if ((unit >= PD_UNITS) || (PD.access <= 0)) return -EINVAL; PD.access--; if (!PD.access && PD.removable) pd_doorlock(unit,IDE_DOORUNLOCK); return 0; }
static int sd_open(struct inode * inode, struct file * filp) { int target; target = DEVICE_NR(MINOR(inode->i_rdev)); if(target >= NR_SD || !rscsi_disks[target].device) return -ENODEV; /* No such device */ /* Make sure that only one process can do a check_change_disk at one time. This is also used to lock out further access when the partition table is being re-read. */ while (rscsi_disks[target].device->busy); if(rscsi_disks[target].device->removable) { check_disk_change(inode->i_rdev); if(!rscsi_disks[target].device->access_count) sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0); }; rscsi_disks[target].device->access_count++; return 0; }
static void do_pd_request (request_queue_t * q) { struct buffer_head * bh; int unit; if (pd_busy) return; repeat: if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return; INIT_REQUEST; pd_dev = MINOR(CURRENT->rq_dev); pd_unit = unit = DEVICE_NR(CURRENT->rq_dev); pd_block = CURRENT->sector; pd_run = CURRENT->nr_sectors; pd_count = CURRENT->current_nr_sectors; bh = CURRENT->bh; if ((pd_dev >= PD_DEVS) || ((pd_block+pd_count) > pd_hd[pd_dev].nr_sects)) { end_request(0); goto repeat; } pd_cmd = CURRENT->cmd; pd_poffs = pd_hd[pd_dev].start_sect; pd_block += pd_poffs; pd_buf = CURRENT->buffer; pd_retries = 0; pd_busy = 1; if (pd_cmd == READ) pi_do_claimed(PI,do_pd_read); else if (pd_cmd == WRITE) pi_do_claimed(PI,do_pd_write); else { pd_busy = 0; end_request(0); goto repeat; } }
static int z2_open( struct inode *inode, struct file *filp ) { int device; int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) * sizeof( z2ram_map[0] ); int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) * sizeof( z2ram_map[0] ); int rc = -ENOMEM; device = DEVICE_NR( inode->i_rdev ); if ( current_device != -1 && current_device != device ) { rc = -EBUSY; goto err_out; } if ( current_device == -1 ) { z2_count = 0; chip_count = 0; list_count = 0; z2ram_size = 0; /* Use a specific list entry. */ if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) { int index = device - Z2MINOR_MEMLIST1 + 1; unsigned long size, paddr, vaddr; if (index >= m68k_realnum_memory) { printk( KERN_ERR DEVICE_NAME ": no such entry in z2ram_map\n" ); goto err_out; } paddr = m68k_memory[index].addr; size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1); #ifdef __powerpc__ /* FIXME: ioremap doesn't build correct memory tables. */ { vfree(vmalloc (size)); } vaddr = (unsigned long) __ioremap (paddr, size, _PAGE_WRITETHRU); #else vaddr = (unsigned long)ioremap(paddr, size); #endif z2ram_map = kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]), GFP_KERNEL); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } while (size) { z2ram_map[ z2ram_size++ ] = vaddr; size -= Z2RAM_CHUNKSIZE; vaddr += Z2RAM_CHUNKSIZE; list_count++; } if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK List Entry %d Memory\n", list_count * Z2RAM_CHUNK1024, index ); } else switch ( device ) { case Z2MINOR_COMBINED: z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_z2ram(); get_chipram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n", z2_count * Z2RAM_CHUNK1024, chip_count * Z2RAM_CHUNK1024, ( z2_count + chip_count ) * Z2RAM_CHUNK1024 ); break; case Z2MINOR_Z2ONLY: z2ram_map = kmalloc( max_z2_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_z2ram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK of Zorro II RAM\n", z2_count * Z2RAM_CHUNK1024 ); break; case Z2MINOR_CHIPONLY: z2ram_map = kmalloc( max_chip_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_chipram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK Chip RAM\n", chip_count * Z2RAM_CHUNK1024 ); break; default: rc = -ENODEV; goto err_out; break; } if ( z2ram_size == 0 ) { printk( KERN_NOTICE DEVICE_NAME ": no unused ZII/Chip RAM found\n" ); goto err_out_kfree; } current_device = device; z2ram_size <<= Z2RAM_CHUNKSHIFT; z2_sizes[ device ] = z2ram_size >> 10; blk_size[ MAJOR_NR ] = z2_sizes; }
static int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { kdev_t dev = inode->i_rdev; struct Scsi_Host * host; Scsi_Device * SDev; int diskinfo[4]; SDev = rscsi_disks[DEVICE_NR(dev)].device; if (!SDev) return -ENODEV; /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it * may try and take the device offline, in which case all further * access to the device is prohibited. */ if( !scsi_block_when_processing_errors(SDev) ) { return -ENODEV; } switch (cmd) { case HDIO_GETGEO: /* Return BIOS disk parameters */ { struct hd_geometry *loc = (struct hd_geometry *) arg; if(!loc) return -EINVAL; host = rscsi_disks[DEVICE_NR(dev)].device->host; /* default to most commonly used values */ diskinfo[0] = 0x40; diskinfo[1] = 0x20; diskinfo[2] = rscsi_disks[DEVICE_NR(dev)].capacity >> 11; /* override with calculated, extended default, or driver values */ if(host->hostt->bios_param != NULL) host->hostt->bios_param(&rscsi_disks[DEVICE_NR(dev)], dev, &diskinfo[0]); else scsicam_bios_param(&rscsi_disks[DEVICE_NR(dev)], dev, &diskinfo[0]); if (put_user(diskinfo[0], &loc->heads) || put_user(diskinfo[1], &loc->sectors) || put_user(diskinfo[2], &loc->cylinders) || put_user(sd_gendisks[SD_MAJOR_IDX( inode->i_rdev)].part[MINOR( inode->i_rdev)].start_sect, &loc->start)) return -EFAULT; return 0; } case HDIO_GETGEO_BIG: { struct hd_big_geometry *loc = (struct hd_big_geometry *) arg; if(!loc) return -EINVAL; host = rscsi_disks[DEVICE_NR(dev)].device->host; /* default to most commonly used values */ diskinfo[0] = 0x40; diskinfo[1] = 0x20; diskinfo[2] = rscsi_disks[DEVICE_NR(dev)].capacity >> 11; /* override with calculated, extended default, or driver values */ if(host->hostt->bios_param != NULL) host->hostt->bios_param(&rscsi_disks[DEVICE_NR(dev)], dev, &diskinfo[0]); else scsicam_bios_param(&rscsi_disks[DEVICE_NR(dev)], dev, &diskinfo[0]); if (put_user(diskinfo[0], &loc->heads) || put_user(diskinfo[1], &loc->sectors) || put_user(diskinfo[2], (unsigned int *) &loc->cylinders) || put_user(sd_gendisks[SD_MAJOR_IDX( inode->i_rdev)].part[MINOR( inode->i_rdev)].start_sect, &loc->start)) return -EFAULT; return 0; } case BLKGETSIZE: case BLKGETSIZE64: case BLKROSET: case BLKROGET: case BLKRASET: case BLKRAGET: case BLKFLSBUF: case BLKSSZGET: case BLKPG: case BLKELVGET: case BLKELVSET: case BLKBSZGET: case BLKBSZSET: return blk_ioctl(inode->i_rdev, cmd, arg); case BLKRRPART: /* Re-read partition tables */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return revalidate_scsidisk(dev, 1); default: return scsi_ioctl(rscsi_disks[DEVICE_NR(dev)].device , cmd, (void *) arg); } }
static int sd_init_command(Scsi_Cmnd * SCpnt) { int dev, block, this_count; struct hd_struct *ppnt; Scsi_Disk *dpnt; #if CONFIG_SCSI_LOGGING char nbuff[6]; #endif ppnt = &sd_gendisks[SD_MAJOR_IDX(SCpnt->request.rq_dev)].part[MINOR(SCpnt->request.rq_dev)]; dev = DEVICE_NR(SCpnt->request.rq_dev); block = SCpnt->request.sector; this_count = SCpnt->request_bufflen >> 9; SCSI_LOG_HLQUEUE(1, printk("Doing sd request, dev = 0x%x, block = %d\n", SCpnt->request.rq_dev, block)); dpnt = &rscsi_disks[dev]; if (dev >= sd_template.dev_max || !dpnt->device || !dpnt->device->online || block + SCpnt->request.nr_sectors > ppnt->nr_sects) { SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors)); SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); return 0; } block += ppnt->start_sect; if (dpnt->device->changed) { /* * quietly refuse to do anything to a changed disc until the changed * bit has been reset */ /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ return 0; } SCSI_LOG_HLQUEUE(2, sd_devname(dev, nbuff)); SCSI_LOG_HLQUEUE(2, printk("%s : real dev = /dev/%d, block = %d\n", nbuff, dev, block)); /* * If we have a 1K hardware sectorsize, prevent access to single * 512 byte sectors. In theory we could handle this - in fact * the scsi cdrom driver must be able to handle this because * we typically use 1K blocksizes, and cdroms typically have * 2K hardware sectorsizes. Of course, things are simpler * with the cdrom, since it is read-only. For performance * reasons, the filesystems should be able to handle this * and not force the scsi disk driver to use bounce buffers * for this. */ if (dpnt->device->sector_size == 1024) { if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { printk("sd.c:Bad block number requested"); return 0; } else { block = block >> 1; this_count = this_count >> 1; } } if (dpnt->device->sector_size == 2048) { if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { printk("sd.c:Bad block number requested"); return 0; } else { block = block >> 2; this_count = this_count >> 2; } } if (dpnt->device->sector_size == 4096) { if ((block & 7) || (SCpnt->request.nr_sectors & 7)) { printk("sd.c:Bad block number requested"); return 0; } else { block = block >> 3; this_count = this_count >> 3; } } switch (SCpnt->request.cmd) { case WRITE: if (!dpnt->device->writeable) { return 0; } SCpnt->cmnd[0] = WRITE_6; SCpnt->sc_data_direction = SCSI_DATA_WRITE; break; case READ: SCpnt->cmnd[0] = READ_6; SCpnt->sc_data_direction = SCSI_DATA_READ; break; default: panic("Unknown sd command %d\n", SCpnt->request.cmd); } SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", nbuff, (SCpnt->request.cmd == WRITE) ? "writing" : "reading", this_count, SCpnt->request.nr_sectors)); SCpnt->cmnd[1] = (SCpnt->device->scsi_level <= SCSI_2) ? ((SCpnt->lun << 5) & 0xe0) : 0; if (((this_count > 0xff) || (block > 0x1fffff)) || SCpnt->device->ten) { if (this_count > 0xffff) this_count = 0xffff; SCpnt->cmnd[0] += READ_10 - READ_6; SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; SCpnt->cmnd[5] = (unsigned char) block & 0xff; SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; } else { if (this_count > 0xff)
static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg) { int dev; if ((!inode) || !(inode->i_rdev)) return -EINVAL; dev = DEVICE_NR(inode->i_rdev); if (dev >= xd_drives) return -EINVAL; switch (cmd) { case HDIO_GETGEO: { struct hd_geometry g; struct hd_geometry *geometry = (struct hd_geometry *) arg; g.heads = xd_info[dev].heads; g.sectors = xd_info[dev].sectors; g.cylinders = xd_info[dev].cylinders; g.start = xd_struct[MINOR(inode->i_rdev)].start_sect; return copy_to_user(geometry, &g, sizeof g) ? -EFAULT : 0; } case HDIO_SET_DMA: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (xdc_busy) return -EBUSY; nodma = !arg; if (nodma && xd_dma_buffer) { xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200); xd_dma_buffer = 0; } else if (!nodma && !xd_dma_buffer) { xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); if (!xd_dma_buffer) { nodma = XD_DONT_USE_DMA; return -ENOMEM; } } return 0; case HDIO_GET_DMA: return put_user(!nodma, (long *) arg); case HDIO_GET_MULTCOUNT: return put_user(xd_maxsectors, (long *) arg); case BLKRRPART: if (!capable(CAP_SYS_ADMIN)) return -EACCES; return xd_reread_partitions(inode->i_rdev); case BLKGETSIZE: case BLKGETSIZE64: case BLKFLSBUF: case BLKROSET: case BLKROGET: case BLKRASET: case BLKRAGET: case BLKPG: return blk_ioctl(inode->i_rdev, cmd, arg); default: return -EINVAL; } }
static void do_flash_request() { while(1) { int minor, fsize, opsize; struct flashchip *chip; struct flashpartition *part; unsigned char *fptr; unsigned long flags; INIT_REQUEST; minor = DEVICE_NR(CURRENT_DEV); minor -= FLASH_MINOR; /* for now, just handle requests to the flash minors */ if(minor < 0 || minor >= MAX_PARTITIONS || !partitions[minor].start) { printk(KERN_WARNING "flash: bad minor %d.", minor); end_request(0); continue; } part = partitions + minor; /* get the actual memory address of the sectors requested */ fptr = part->start + CURRENT->sector * FLASH_SECTSIZE; fsize = CURRENT->current_nr_sectors * FLASH_SECTSIZE; /* check so it's not totally out of bounds */ if(fptr + fsize > part->start + part->size) { printk(KERN_WARNING "flash: request past end " "of partition\n"); end_request(0); continue; } /* actually do something, but get a lock on the chip first. * since the partition might span several chips, we need to * loop and lock each chip in turn */ while(fsize > 0) { chip = getchip(fptr); /* how much fits in this chip ? */ opsize = (fptr + fsize) > (chip->start + chip->size) ? (chip->start + chip->size - fptr) : fsize; /* lock the chip */ save_flags(flags); cli(); while(chip->busy) sleep_on(&chip->wqueue); chip->busy = 1; restore_flags(flags); switch(CURRENT->cmd) { case READ: memcpy(CURRENT->buffer, fptr, opsize); FDEBUG(printk("flash read from %p to %p " "size %d\n", fptr, CURRENT->buffer, opsize)); break; case WRITE: FDEBUG(printk("flash write block at 0x%p\n", fptr)); flash_write(fptr, (unsigned char *) CURRENT->buffer, opsize); break; default: /* Shouldn't happen. */ chip->busy = 0; wake_up(&chip->wqueue); end_request(0); continue; } /* release the lock */ chip->busy = 0; wake_up(&chip->wqueue); /* see if there is anything left to write in the next chip */ fsize -= opsize; fptr += opsize; } /* We have a liftoff! */ end_request(1); } }
static int rd_ioctl(register struct inode *inode, struct file *file, unsigned int cmd, unsigned int arg) { unsigned long size; unsigned int i; int j, k, target = DEVICE_NR(inode->i_rdev); if (!suser()) return -EPERM; debug2("RD: ioctl %d %s\n", target, (cmd ? "kill" : "make")); switch (cmd) { case RDCREATE: if (rd_info[target].flags && RD_BUSY) { return -EBUSY; } else { /* allocate memory */ #if 0 rd_info[target].size = 0; #endif k = -1; for (i = 0; i <= (arg - 1) / ((SEG_SIZE / 1024) * P_SIZE); i++) { j = find_free_seg(); /* find free place in queue */ debug1("RD: ioctl find_free_seg() = %d\n", j); if (j == -1) { rd_dealloc(target); return -ENOMEM; } if (i == 0) rd_info[target].index = j; if (i == (arg / ((SEG_SIZE / 1024) * P_SIZE))) /* size in 16 byte pagez = (arg % 64) * 64 */ size = (arg % ((SEG_SIZE / 1024) * P_SIZE)) * ((SEG_SIZE / 1024) * P_SIZE); else size = SEG_SIZE; rd_segment[j].segment = mm_alloc(size); if (rd_segment[j].segment == -1) { rd_dealloc(target); return -ENOMEM; } debug4("RD: ioctl pass: %d, allocated %d pages, rd_segment[%d].segment = 0x%x\n", i, (int) size, j, rd_segment[j].segment); /* recalculate int size to reflect size in sectors, not pages */ size = size / DIVISOR; rd_segment[j].seg_size = size; /* size in sectors */ debug2("RD: ioctl rd_segment[%d].seg_size = %d sectors\n", j, rd_segment[j].seg_size); rd_info[target].size += rd_segment[j].seg_size; /* size in 512 B blocks */ size = (long) rd_segment[j].seg_size * SECTOR_SIZE; debug1("RD: ioctl size = %ld\n", size); /* this terrible hack makes sure fmemset clears whole segment even if size == 64 KB :) */ if (size != ((long) SEG_SIZE * (long) P_SIZE)) { debug2("RD: ioctl calling fmemset(0, 0x%x, 0, %d) ..\n", rd_segment[j].segment, (int) size); fmemset(0, rd_segment[j].segment, 0, (int) size); /* clear seg_size * SECTOR_SIZE bytes */ } else { debug2("RD: ioctl calling fmemset(0, 0x%x, 0, %d) ..\n", rd_segment[j].segment, (int) (size / 2)); fmemset(0, rd_segment[j].segment, 0, (int) (size / 2)); /* we could hardcode 32768 instead of size / 2 here */ debug3("rd_ioctl(): calling fmemset(%d, 0x%x, 0, %d) ..\n", (int) (size / 2), rd_segment[j].segment, (int) (size / 2)); fmemset((size / 2), rd_segment[j].segment, 0, (int) (size / 2)); } if (k != -1) rd_segment[k].next = j; /* set link to next index */ k = j; } rd_info[target].flags = RD_BUSY; debug3("RD: ioctl ramdisk %d created, size = %d blocks, index = %d\n", target, rd_info[target].size, rd_info[target].index); } debug("RD: ioctl about to return 0\n"); return 0; case RDDESTROY: if (rd_info[target].flags && RD_BUSY) { invalidate_inodes(inode->i_rdev); invalidate_buffers(inode->i_rdev); rd_dealloc(target); rd_info[target].flags = 0; return 0; } else return -EINVAL; } return -EINVAL; }
UINT32 CANMSG_Init (UINT8 Channel) { //*--------------------------------------------------------------------------* //* Local variables * //*--------------------------------------------------------------------------* UINT32 Error, i, InitChecklist; TCAN_Obj NewCANObj; //*--------------------------------------------------------------------------* //* Start of function * //*--------------------------------------------------------------------------* // Sicherheitshalber Channel = DEVICE_NR(Channel); // Init Checkliste erstellen if (CAN_Selected_Channel == NONE) // Kaltstart nach Poweron InitChecklist = INIT_EXT_HW + INIT_TRANSCEIVER + INIT_RX_OBJECT + INIT_TX_OBJECT; else { // Sicherheitshalber CAN_Selected_Channel = DEVICE_NR(CAN_Selected_Channel); InitChecklist = 0x0000; if (Gateway_CAN[Channel].RXidentifier != Gateway_CAN[CAN_Selected_Channel].RXidentifier) InitChecklist |= INIT_RX_OBJECT; if (Gateway_CAN[Channel].TXidentifier != Gateway_CAN[CAN_Selected_Channel].TXidentifier) InitChecklist |= INIT_TX_OBJECT; if (Gateway_CAN[Channel].Baudrate != Gateway_CAN[CAN_Selected_Channel].Baudrate) InitChecklist |= INIT_TRANSCEIVER + INIT_RX_OBJECT + INIT_TX_OBJECT; if (Gateway_CAN[Channel].Transceiver != Gateway_CAN[CAN_Selected_Channel].Transceiver) InitChecklist = INIT_EXT_HW + INIT_TRANSCEIVER + INIT_RX_OBJECT + INIT_TX_OBJECT;; } // Kanalnummer jetzt fuers naechste Mal merken CAN_Selected_Channel = Channel; //*--------------------------------------------------------------------------* // Zeit fuer eventuelles Busoff Recovery timWait (10); Error = CANMSG_NO_ERROR; // CAN Interrupt bzw. Schalter dafuer ausschalten, um sichere Initialisierung zu ermöglichen CANMSG_INT_Enable = 0; // Statemaschinen initialisieren CANMSG_RX_State = CANMSG_RX_DISABLED; CANMSG_TX_State = CANMSG_TX_DISABLED; //*--------------------------------------------------------------------------* //* CAN-Hardware initialisieren * //*--------------------------------------------------------------------------* if (InitChecklist & INIT_EXT_HW) { // CAN-Transceiver einschalten ; Bausteinabhaengig CAN_TRANSCEIVER_ENABLE; // CAN-Transceiver sicherheitshalber aufwecken , sonst Verlust der ersten 3 Flanken an RX // Bausteinabhaengig CAN_TRANSCEIVER_WAKEUP; } if (InitChecklist & INIT_TRANSCEIVER) { #if CP_CAN3 // Grundinitialisierung CAN3_Init(Gateway_CAN[Channel].Baudrate); #else #error Can init missing in canmsg.c #endif } //*--------------------------------------------------------------------------* //* RX-Object setzen * //*--------------------------------------------------------------------------* if (InitChecklist & INIT_RX_OBJECT) { // 8-bit Data Bytes vorbesetzen , technisch nicht noetig for(i=0; i < 8; i++) NewCANObj.ubData[i] = 0; NewCANObj.ubMsgObjNr = CAN_RX_OBJECT; // 8-bit Message Configuration Register 8 Databytes , Standard Identifier, Receive NewCANObj.ubMsgCfg = 0x80; // standard (11-bit)/extended (29-bit) identifier NewCANObj.ulArbitr = Gateway_CAN[Channel].RXidentifier; // alle Bits sind signifikant NewCANObj.ulMaskr = 0x000007FF; // RX Message Objekt konfigurieren CAN_bRequestMsgObj(CAN_RX_OBJECT); CAN_vConfigMsgObj(CAN_RX_OBJECT, &NewCANObj); } //*--------------------------------------------------------------------------* //* TX-Object setzen * //*--------------------------------------------------------------------------* if (InitChecklist & INIT_TX_OBJECT) { NewCANObj.ubMsgObjNr = CAN_TX_OBJECT; // 8-bit Message Configuration Register 8 Databytes , Standard Identifier, Transmit NewCANObj.ubMsgCfg = 0x88; // standard (11-bit)/extended (29-bit) identifier NewCANObj.ulArbitr = Gateway_CAN[Channel].TXidentifier; // restliche Einstellungen wie beim RX-Object // TX Message Objekt konfigurieren CAN_bRequestMsgObj(CAN_TX_OBJECT); CAN_vConfigMsgObj(CAN_TX_OBJECT, &NewCANObj); } //*--------------------------------------------------------------------------* //* Initialisierung vervollständigen * //*--------------------------------------------------------------------------* // Fehlerspeicher für Interruptroutine rücksetzen CANMSG_INT_Error = CANMSG_NO_ERROR; // CAN Interrupt bzw. Schalter jetzt einschalten CANMSG_INT_Enable = 1; // System ist ab jetzt initialisiert CANMSG_RX_State = CANMSG_RX_ENABLED; CANMSG_TX_State = CANMSG_TX_ENABLED; // beim Comm-Modul anmelden Gateway_CAN_CommIndex[Channel] = msgRegister(CAN+Channel, "CAN", Channel, 0, NULL, CAN_Send, NULL); return (Error); } // end of function CANMSG_Init