static int __init servicer_init(void) { int ret; //下面的代码可以自动生成设备节点,但是该节点在/dev目录下,而不在/dev/misc目录下 //其实misc_register就是用主设备号10调用register_chrdev()的 misc设备其实也就是特殊的字符设备。 //注册驱动程序时采用misc_register函数注册,此函数中会自动创建设备节点,即设备文件。无需mknod指令创建设备文件。因为misc_register()会调用class_device_create()或者device_create()。 ret = misc_register(&misc); lidbg (DEVICE_NAME"servicer_init\n"); //DECLARE_KFIFO(cmd_fifo); //INIT_KFIFO(cmd_fifo); lidbg ("kfifo_init,FIFO_SIZE=%d\n", FIFO_SIZE); kfifo_init(&k2u_fifo, k2u_fifo_buffer, FIFO_SIZE); kfifo_init(&u2k_fifo, u2k_fifo_buffer, FIFO_SIZE); spin_lock_init(&fifo_k2u_lock); CREATE_KTHREAD(thread_u2k, NULL); lidbg_chmod("/dev/lidbg_servicer"); LIDBG_MODULE_LOG; return ret; }
static void console_init_early(void) { kfifo_init(console_input_fifo, console_input_buffer, CONSOLE_BUFFER_SIZE); kfifo_init(console_output_fifo, console_output_buffer, CONSOLE_BUFFER_SIZE); initialized = CONSOLE_INITIALIZED_BUFFER; }
static void usart1_fifo_init(void) { kfifo_init(&rx_fifo,rx_buff,MAX_RX_BUFF_SIZE); #if(USART1_TX_MODE==TX_IRQ) //中断发送模式下才使用fifo kfifo_init(&tx_fifo,tx_buff,MAX_TX_BUFF_SIZE); #elif(USART1_TX_MODE==TX_DMA) #endif }
/** * kfifo_alloc - allocates a new FIFO and its internal buffer * @size: the size of the internal buffer to be allocated. * @gfp_mask: get_free_pages mask, passed to kmalloc() * @lock: the lock to be used to protect the fifo buffer * * The size will be rounded-up to a power of 2. */ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) { unsigned char *buffer; struct kfifo *ret; /* * round up to the next power of 2, since our 'let the indices * wrap' tachnique works only in this case. */ if (size & (size - 1)) { BUG_ON(size > 0x80000000); size = roundup_pow_of_two(size); } buffer = kmalloc(size, gfp_mask); if (!buffer) return ERR_PTR(-ENOMEM); ret = kfifo_init(buffer, size, gfp_mask, lock); if (IS_ERR(ret)) kfree(buffer); return ret; }
/** * kfifo_alloc - allocates a new FIFO and its internal buffer * @size: the size of the internal buffer to be allocated. * @gfp_mask: get_free_pages mask, passed to kmalloc() * @lock: the lock to be used to protect the fifo buffer * * The size will be rounded-up to a power of 2. */ struct kfifo *kfifo_alloc(unsigned int size) { unsigned char *buffer; struct kfifo *fifo; /* * round up to the next power of 2, since our 'let the indices * wrap' tachnique works only in this case. */ if (size & (size - 1)) { BUG_ON(size > 0x80000000); size = roundup_pow_of_two(size); } buffer = malloc(size); if (!buffer) return NULL; fifo = malloc(sizeof(struct kfifo)); if (!fifo) { free(buffer); return NULL; } kfifo_init(fifo, buffer, size); return fifo; }
static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, struct srp_buf **ring) { int i; struct iu_entry *iue; q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); if (!q->pool) return -ENOMEM; q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); if (!q->items) goto free_pool; spin_lock_init(&q->lock); kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *)); for (i = 0, iue = q->items; i < max; i++) { kfifo_in(&q->queue, (void *) &iue, sizeof(void *)); iue->sbuf = ring[i]; iue++; } return 0; kfree(q->items); free_pool: kfree(q->pool); return -ENOMEM; }
static inline struct kfifo* dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) { struct kfifo *fifo; gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) fifo = kfifo_init(buf, size, flags, lock); #else fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); if (!fifo) { return NULL; } kfifo_init(fifo, buf, size); #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ return fifo; }
int scull_kfifo_init_module(void) { int result; dev_t dev = 0; /* * Get a range of minor numbers to work with, asking for a dynamic * major unless directed otherwise at load time. */ if (scull_kfifo_major) { dev = MKDEV(scull_kfifo_major, scull_kfifo_minor); result = register_chrdev_region(dev, 1, "scull_kfifo"); } else { result = alloc_chrdev_region(&dev, scull_kfifo_minor, 1, "scull_kfifo"); scull_kfifo_major = MAJOR(dev); } if (result < 0) { printk(KERN_WARNING "scull: can't get major %d\n", scull_kfifo_major); return result; } /* * allocate the devices -- we can't have them static, as the number * can be specified at load time */ scull_kfifo_devices = kmalloc( sizeof(struct scull_kfifo), GFP_KERNEL); if (!scull_kfifo_devices) { result = -ENOMEM; goto fail; /* Make this more graceful */ } memset(scull_kfifo_devices, 0, sizeof(struct scull_kfifo)); tekkamanbuffer = kmalloc( BUFSIZE, GFP_KERNEL); if (!tekkamanbuffer) { result = -ENOMEM; goto fail; /* Make this more graceful */ } tekkaman = kmalloc( BUFSIZE, GFP_KERNEL); if (!tekkaman) { result = -ENOMEM; goto fail; /* Make this more graceful */ } /* Initialize each device. */ init_MUTEX(&scull_kfifo_devices->sem); spin_lock_init (&scull_kfifo_devices->lock); scull_kfifo_devices->tekkamankfifo = kfifo_init(tekkamanbuffer, BUFSIZE ,GFP_KERNEL, &scull_kfifo_devices->lock); init_waitqueue_head(&scull_kfifo_devices->inq); init_waitqueue_head(&scull_kfifo_devices->outq); scull_kfifo_setup_cdev(scull_kfifo_devices); return 0; /* succeed */ fail: scull_kfifo_cleanup_module(); return result; }
static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, struct srp_buf **ring) { int i; struct iu_entry *iue; q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); if (!q->pool) return -ENOMEM; q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); if (!q->items) goto free_pool; spin_lock_init(&q->lock); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) q->queue = kfifo_init((void *) q->pool, max * sizeof(void *), GFP_KERNEL, &q->lock); if (IS_ERR(q->queue)) goto free_item; #else kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *)); #endif for (i = 0, iue = q->items; i < max; i++) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) __kfifo_put(q->queue, (void *) &iue, sizeof(void *)); #else kfifo_in(&q->queue, (void *) &iue, sizeof(void *)); #endif iue->sbuf = ring[i]; iue++; } return 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) free_item: #endif kfree(q->items); free_pool: kfree(q->pool); return -ENOMEM; }
void demux_reset(demux_t *dm) { demux_ent_t *ent; int proc_id; cond_init(&dm->next_chunk_cond); for(proc_id=0; proc_id < NUM_CHUNK_PROC; proc_id++) { ent = dm->entries + proc_id; if(ent->buf == NULL) ent->buf = kmalloc(DEMUX_BUF_SIZE, GFP_KERNEL); kfifo_init(&ent->fifo, ent->buf, DEMUX_BUF_SIZE); cond_init(&ent->fifo_full_cond); } }
/** * kfifo_alloc - allocates a new FIFO and its internal buffer * @size: the size of the internal buffer to be allocated. * @lock: the lock to be used to protect the fifo buffer * * The size will be rounded-up to a power of 2. */ struct kfifo *kfifo_alloc(unsigned int size, cvmx_spinlock_t *lock) { unsigned char *buffer; struct kfifo *ret; buffer = malloc(size); if (!buffer) return NULL; ret = kfifo_init(buffer, size, lock); if (IS_ERR(ret)) free(buffer); return ret; }
/** * kfifo_alloc - allocates a new FIFO and its internal buffer * @size: the size of the internal buffer to be allocated. * @gfp_mask: get_free_pages mask, passed to kmalloc() * @lock: the lock to be used to protect the fifo buffer * * The size will be rounded-up to a power of 2. */ struct kfifo *kfifo_alloc(unsigned int size) { unsigned char *buffer; struct kfifo *ret; buffer = malloc(size); if (!buffer) return NULL; ret = kfifo_init(buffer, size); if (!ret) free(buffer); return ret; }
int mt76x2_register_device(struct mt76x2_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; void *status_fifo; int fifo_size; int i, ret; fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status)); status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); if (!status_fifo) return -ENOMEM; kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); mt76x2_init_device(dev); ret = mt76x2_init_hardware(dev); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { u8 *addr = dev->macaddr_list[i].addr; memcpy(addr, dev->mt76.macaddr, ETH_ALEN); if (!i) continue; addr[0] |= BIT(1); addr[0] ^= ((i - 1) << 2); } wiphy->addresses = dev->macaddr_list; wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); wiphy->iface_combinations = if_comb; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); wiphy->reg_notifier = mt76x2_regd_notifier; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif BIT(NL80211_IFTYPE_ADHOC); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); mt76x2_dfs_init_detector(dev); /* init led callbacks */ dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, ARRAY_SIZE(mt76x2_rates)); if (ret) goto fail; mt76x2_init_debugfs(dev); mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband); mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband); return 0; fail: mt76x2_stop_hardware(dev); return ret; }
static int __init p_serial_init(void) { printk("1>alloc_chrdev_region(...!\n"); if (alloc_chrdev_region(&p_device_id, 0,1, "pseudo_serial_driver")) { printk("Error in allocating the serial device driver region..!\n"); return -EINVAL; } printk("2>request_region(...!\n"); rs = request_region(base_addr,NO_OF_PORTS,"pseudo_serial_device0"); if(rs==NULL) { unregister_chrdev_region(p_device_id, 1); return -EBUSY; } printk("3>my_dev = kzalloc...!\n"); my_dev = kmalloc(sizeof(P_SERIAL_DEV),GFP_KERNEL); if(my_dev==NULL) { printk("Error in creating a private object...!\n"); unregister_chrdev_region(p_device_id, 1); release_region(base_addr,NO_OF_PORTS); return -EBUSY; } printk("4>my_dev->read_buff = kzalloc(...!\n"); my_dev->read_buff = kmalloc(MAX_BUFFER_AREA,GFP_KERNEL); if(my_dev->read_buff==NULL) { printk("error in read_buff's memory allocation...\n"); unregister_chrdev_region(p_device_id, 1); kfree(my_dev); release_region(base_addr,NO_OF_PORTS); return -ENOMEM; } printk("5>kfifo_init(&(my_dev->read_kfifo)...!\n"); kfifo_init(&(my_dev->read_kfifo), my_dev->read_buff, (unsigned int) MAX_BUFFER_AREA); printk("6>my_dev->write_buff = kzalloc(...!\n"); my_dev->write_buff = kmalloc(MAX_BUFFER_AREA,GFP_KERNEL); if(my_dev->write_buff==NULL) { printk("error in read_buff's memory allocation...\n"); unregister_chrdev_region(p_device_id, 1); kfifo_free(&(my_dev->read_kfifo)); kfree(my_dev); release_region(base_addr,NO_OF_PORTS); return -ENOMEM; } printk("7>kfifo_init(&(my_dev->write_kfifo)...!\n"); kfifo_init(&(my_dev->write_kfifo), my_dev->read_buff, (unsigned int) MAX_BUFFER_AREA); printk("8>spin_lock...!\n"); spin_lock_init(&(my_dev->wr_spinlock)); spin_lock_init(&(my_dev->rd_spinlock)); printk("9>init_waitqueue...!\n"); init_waitqueue_head(&(my_dev->read_queue)); init_waitqueue_head(&(my_dev->write_queue)); printk("10>Device ID is %u (MAJOR:%d,Minor:%d)\n",p_device_id,MAJOR(p_device_id),MINOR(p_device_id)); cdev_init(&my_dev->cdev,&p_device_fops); printk("11>kobject_set_name(...!\n"); kobject_set_name(&(my_dev->cdev.kobj),"p_serial_dev0"); printk("12>: my_dev->cdev.ops\n"); my_dev->cdev.ops = &p_device_fops; printk("13>: cdev_add(\n"); if ((cdev_add(&my_dev->cdev, p_device_id , 1)) < 0) { printk("Error in adding the serial device to the driver region..!\n"); // kobject_put(&(my_dev->cdev.kobj)); unregister_chrdev_region(p_device_id, 1); kfifo_free(&(my_dev->read_kfifo)); kfifo_free(&(my_dev->write_kfifo)); kfree(my_dev); release_region(base_addr,NO_OF_PORTS); return -EINVAL; } return 0; }
int mt76_register_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; struct wiphy *wiphy = hw->wiphy; void *status_fifo; int fifo_size; int i, ret; fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76_tx_status)); status_fifo = devm_kzalloc(dev->dev, fifo_size, GFP_KERNEL); if (!status_fifo) return -ENOMEM; kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); ret = mt76_init_hardware(dev); if (ret) return ret; SET_IEEE80211_DEV(hw, dev->dev); hw->queues = 4; hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_SUPPORTS_HT_CCK_RATES | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_SUPPORTS_RC_TABLE; hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; hw->sta_data_size = sizeof(struct mt76_sta); hw->vif_data_size = sizeof(struct mt76_vif); hw->txq_data_size = sizeof(struct mt76_txq); dev->macaddr[0] &= ~BIT(1); SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { u8 *addr = dev->macaddr_list[i].addr; memcpy(addr, dev->macaddr, ETH_ALEN); if (!i) continue; addr[0] |= BIT(1); addr[0] ^= ((i - 1) << 2); } wiphy->addresses = dev->macaddr_list; wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif BIT(NL80211_IFTYPE_ADHOC); wiphy->iface_combinations = if_comb; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); ret = mt76_init_sband_2g(dev); if (ret) goto fail; ret = mt76_init_sband_5g(dev); if (ret) goto fail; INIT_LIST_HEAD(&dev->txwi_cache); INIT_DELAYED_WORK(&dev->cal_work, mt76_phy_calibrate); INIT_DELAYED_WORK(&dev->mac_work, mt76_mac_work); ret = ieee80211_register_hw(hw); if (ret) goto fail; mt76_init_debugfs(dev); return 0; fail: mt76_stop_hardware(dev); return ret; }
static int __init pseudo_init(void) { int i,retval=0; printk("we are in init function \n"); i = alloc_chrdev_region(&pdevice,0,1,"pseudo"); //for ndevices.. if(i>0) { printk("Error in device creating.....\n"); return -EBUSY; } my_dev=kmalloc(sizeof(c_dev),GFP_KERNEL); if(my_dev==NULL) { printk("error in creating devices\n"); unregister_chrdev_region(pdevice,1); } // list_add_tail(&my_dev->list,&dev_list); --- does not require this list //--------------------------------------------------------------------- my_dev -> my_kobj = kobject_create_and_add("kobject_example", kernel_kobj); if (!(my_dev -> my_kobj)) return -ENOMEM; retval = sysfs_create_group(my_dev->my_kobj, &attr_group); if (retval) kobject_put(my_dev->my_kobj); //---------------------------------------------------------------------- my_dev->buff = kmalloc(MAX_BUFFSIZE,GFP_KERNEL); // creating memory for buffer in kernel if(my_dev==NULL) { kfree(my_dev); unregister_chrdev_region(pdevice,1); return -ENOMEM; } kfifo_init(&(my_dev->kfifo),my_dev->buff,MAX_BUFFSIZE); // ??? if(&(my_dev->kfifo)==NULL) { kfree(my_dev); unregister_chrdev_region(pdevice,1); } cdev_init(&my_dev->cdev,&device_fops); //device operations kobject_set_name(&(my_dev->cdev.kobj),"device0");//increment the reference count my_dev->cdev.ops = &device_fops; if(cdev_add(&my_dev->cdev,pdevice,1)<0) { printk("error in adding char device\n"); kobject_put(&(my_dev->cdev.kobj)); // decrements the reference count &frees the object kfifo_free(&my_dev->kfifo); //kfree(my_dev->buff); //???? kfree(my_dev); unregister_chrdev_region(pdevice,1); } printk(KERN_INFO "device has been loaded\n"); return 0; }
int mt76x2_register_device(struct mt76x2_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct ieee80211_supported_band *sband; struct wiphy *wiphy = hw->wiphy; void *status_fifo; int fifo_size; int i, ret; fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status)); status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); if (!status_fifo) return -ENOMEM; kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); ret = mt76x2_init_hardware(dev); if (ret) return ret; hw->queues = 4; hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; hw->extra_tx_headroom = 2; hw->sta_data_size = sizeof(struct mt76x2_sta); hw->vif_data_size = sizeof(struct mt76x2_vif); for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { u8 *addr = dev->macaddr_list[i].addr; memcpy(addr, dev->mt76.macaddr, ETH_ALEN); if (!i) continue; addr[0] |= BIT(1); addr[0] ^= ((i - 1) << 2); } wiphy->addresses = dev->macaddr_list; wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); wiphy->iface_combinations = if_comb; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); dev->mt76.sband_2g.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; dev->mt76.sband_5g.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, ARRAY_SIZE(mt76x2_rates)); if (ret) goto fail; sband = wiphy->bands[dev->mt76.cap.has_5ghz ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ]; dev->chandef.chan = &sband->channels[0]; mt76x2_init_debugfs(dev); return 0; fail: mt76x2_stop_hardware(dev); return ret; }