int srb_http_get_status(char *buf, int len, enum srb_http_statuscode *code) { int ret; long status; char codebuf[8]; int codelen = 0; if (!strncmp(buf, HTTP_VER, strlen(HTTP_VER))) { buf = buf + strlen(HTTP_VER); while (*buf != 0 && *buf == ' ') ++buf; while (codelen < sizeof(codebuf) - 1 && buf[codelen] != 0 && buf[codelen] >= '0' && buf[codelen] <= '9') { codebuf[codelen] = buf[codelen]; codelen++; } codebuf[codelen] = 0; ret = kstrtol(codebuf, 10, &status); if (ret != 0) { SRB_LOG_ERR(srb_log, "Could not retrieve HTTP status code: err %i (buf=%.*s)", ret, 5, buf); return -1; } // Known codes have their values fixed to the enum, so keep them // Otherwise, consider it as an unknown extension. switch (status) { case 100: case 101: case 200: case 201: case 202: case 203: case 204: case 205: case 206: case 300: case 301: case 302: case 303: case 304: case 305: case 307: case 400: case 401: case 402: case 403: case 404: case 405: case 40 : case 407: case 408: case 409: case 410: case 411: case 412: case 413: case 414: case 415: case 416: case 417: case 500: case 501: case 502: case 503: case 504: case 505: *code = status; break ; default: *code = SRB_HTTP_STATUS_EXTENSION; break ; } return 0; } return -1; }
static int human_to_bytes(char *size_str, unsigned long long *size) { char h; unsigned long long coef; int ret; //printk(KERN_DEBUG "DEBUG: human_to_bytes: buff: %s\n", size_str); coef = 1; h = size_str[strlen(size_str) - 1]; /* get human format if any and set coeff */ switch (h) { case 'G': coef = GB; size_str[strlen(size_str) - 1] = '\0'; break; case 'M': coef = MB; size_str[strlen(size_str) - 1] = '\0'; break; case 'k': coef = kB; size_str[strlen(size_str) - 1] = '\0'; break; default: coef = 1; } /* calculate size */ ret = kstrtoull(size_str, 10, size); if (ret != 0) { SRB_LOG_ERR(srb_log, "Invalid volume size %s (%llu) (ret: %d)", size_str, *size, ret); return -EINVAL; } *size = *size * coef; return 0; }
/* This function gets the next free slot in device tab (devtab) ** and sets its name and id. ** Note : all the remaining fields states are undefived, it is ** the caller responsability to set them. */ static int srb_device_new(const char *devname, srb_device_t *dev) { int ret = -EINVAL; int i; SRB_LOG_INFO(srb_log, "srb_device_new: creating new device %s" " with %d threads", devname, thread_pool_size); if (NULL == dev) { ret = -EINVAL; goto out; } if (NULL == devname || strlen(devname) >= DISK_NAME_LEN) { SRB_LOG_ERR(srb_log, "srb_device_new: " "Invalid (or too long) device name '%s'", devname == NULL ? "" : devname); ret = -EINVAL; goto out; } /* Lock table to protect against concurrent devices * creation */ dev->debug.name = &dev->name[0]; dev->debug.level = srb_log; dev->users = 0; strncpy(dev->name, devname, strlen(devname)); /* XXX: dynamic allocation of thread pool and cdmi connection pool * NB: The memory allocation for the thread is an array of pointer * whereas the allocation for the cdmi connection pool is an array * of cdmi connection structure */ dev->thread_cdmi_desc = vmalloc(thread_pool_size * sizeof(struct srb_cdmi_desc_s *)); if (dev->thread_cdmi_desc == NULL) { SRB_LOG_CRIT(srb_log, "srb_device_new: Unable to allocate memory for CDMI struct pointer"); ret = -ENOMEM; goto err_mem; } for (i = 0; i < thread_pool_size; i++) { dev->thread_cdmi_desc[i] = vmalloc(sizeof(struct srb_cdmi_desc_s)); if (dev->thread_cdmi_desc[i] == NULL) { SRB_LOG_CRIT(srb_log, "srb_device_new: Unable to allocate memory for CDMI struct, step %d", i); ret = -ENOMEM; goto err_mem; } } dev->thread = vmalloc(thread_pool_size * sizeof(struct task_struct *)); if (dev->thread == NULL) { SRB_LOG_CRIT(srb_log, "srb_device_new: Unable to allocate memory for kernel thread struct"); ret = -ENOMEM; goto err_mem; } return 0; err_mem: if (NULL != dev && NULL != dev->thread_cdmi_desc) { for (i = 0; i < thread_pool_size; i++) { if (dev->thread_cdmi_desc[i]) vfree(dev->thread_cdmi_desc[i]); } vfree(dev->thread_cdmi_desc); } out: return ret; }
static int srb_init_disk(struct srb_device_s *dev) { struct gendisk *disk = NULL; struct request_queue *q; int i; int ret = 0; SRB_LOG_INFO(srb_log, "srb_init_disk: initializing disk for device: %s", dev->name); /* create gendisk info */ disk = alloc_disk(DEV_MINORS); if (!disk) { SRB_LOG_WARN(srb_log, "srb_init_disk: unable to allocate memory for disk for device: %s", dev->name); return -ENOMEM; } SRB_LOG_DEBUG(srb_log, "Creating new disk: %p", disk); strcpy(disk->disk_name, dev->name); disk->major = dev->major; disk->first_minor = 0; disk->fops = &srb_fops; disk->private_data = dev; /* init rq */ q = blk_init_queue(srb_rq_fn, &dev->rq_lock); if (!q) { SRB_LOG_WARN(srb_log, "srb_init_disk: unable to init block queue for device: %p, disk: %p", dev, disk); srb_free_disk(dev); return -ENOMEM; } blk_queue_max_hw_sectors(q, DEV_NB_PHYS_SEGS); q->queuedata = dev; dev->disk = disk; dev->q = disk->queue = q; dev->nb_threads = 0; //blk_queue_flush(q, REQ_FLUSH | REQ_FUA); //blk_queue_max_phys_segments(q, DEV_NB_PHYS_SEGS); //TODO: Enable flush and bio (Issue #21) //blk_queue_flush(q, REQ_FLUSH); for (i = 0; i < thread_pool_size; i++) { //if ((ret = srb_cdmi_connect(&dev->debug, &dev->thread_cdmi_desc[i]))) { if ((ret = srb_cdmi_connect(&dev->debug, dev->thread_cdmi_desc[i]))) { SRB_LOG_ERR(srb_log, "Unable to connect to CDMI endpoint: %d", ret); srb_free_disk(dev); return -EIO; } } /* Caution: be sure to call this before spawning threads */ ret = srb_cdmi_getsize(&dev->debug, dev->thread_cdmi_desc[0], &dev->disk_size); if (ret != 0) { SRB_LOG_ERR(srb_log, "Could not retrieve volume size."); srb_free_disk(dev); return ret; } set_capacity(disk, dev->disk_size / 512ULL); for (i = 0; i < thread_pool_size; i++) { dev->thread[i] = kthread_create(srb_thread, dev, "%s", dev->disk->disk_name); if (IS_ERR(dev->thread[i])) { SRB_LOG_ERR(srb_log, "Unable to create worker thread (id %d)", i); dev->thread[i] = NULL; srb_free_disk(dev); goto err_kthread; } wake_up_process(dev->thread[i]); } add_disk(disk); SRBDEV_LOG_INFO(dev, "Attached volume %s of size 0x%llx", disk->disk_name, (unsigned long long)dev->disk_size); return 0; err_kthread: for (i = 0; i < thread_pool_size; i++) { if (dev->thread[i] != NULL) kthread_stop(dev->thread[i]); } return -EIO; }