Exemple #1
0
t_header	*do_malloc(size_t taille, t_header *pstart_free,
 t_header *pstart_alloc)
{
  t_header	*cur;
  unsigned int	nbblocks;

  nbblocks = get_real_size(taille);
  if ((cur = find_freeblock(nbblocks, pstart_free)) == NULL)
  {
    if (ask_memory(nbblocks) == 0)
      return (NULL);
  }
  else
  {
    cur->size = get_real_size(taille);
    add_list_alloc(pstart_alloc, cur);
    return ((t_header *)cur + 1);
  }
  if ((cur = find_freeblock(nbblocks, pstart_free)) == NULL)
    return (NULL);
  else
  {
    cur->size = get_real_size(taille);
    add_list_alloc(pstart_alloc, cur);
    return ((t_header *)cur + 1);
  }
}
	inline void clear_unused_portion()
	{
		for ( u32 i=get_real_size(); i<max_size; ++i )
		{
			arr[i] = 0;
		}
	}
void kfree_pid (u16 pid) {
    Kmalloc_header *chunk = MEMORY_START;

    u16 s = chunk->size;

    if (chunk->user == pid) {
        kfree (chunk);
    }

    chunk += get_real_size(s);
}
Exemple #4
0
void	*myrealloc(void *ptr, size_t taille)
{
 t_header        *header_bloc;
 t_header        *header_newbloc;
 int           old_size;
 unsigned int           nunites;

 /*
 my_putstr("############## REALLOC #########  taille :");
 my_put_nbr(taille);
 my_putstr("Pointeur a realloc :");
 my_put_nbr((unsigned int)ptr);
 my_putchar('\n');
 */
 if (ptr == NULL)
   return (mymalloc(taille));
 header_bloc = (t_header *)ptr - 1;
 if (header_bloc->next != (t_header *)0x42)
   return (NULL);
 /*my_aff_header(header_bloc);*/
if (taille == 0)
   {
     myfree(ptr);
     return (NULL);
   }
 /*
 my_putstr("Bloc a realloc :");
 my_aff_header(header_bloc);
 */
 nunites = get_real_size(taille);
 old_size = (header_bloc->size - 1);
 if (nunites > header_bloc->size)
   {
     if ((header_newbloc = mymalloc(taille)) == NULL)
       return (NULL);
     memcpy((void *)((t_header *)header_newbloc),(void *)((t_header *)header_bloc + 1), old_size * SIZE_BLOC);
     /*     header_newbloc->next = (t_header *)0x42;
     header_newbloc->size = nunites;*/
     myfree((void *)((t_header *)header_bloc + 1));
     return ((void *)((t_header *)header_newbloc));
   }
 if (nunites == header_bloc->size)
   return (ptr);
 if (nunites < header_bloc->size)
   {
      header_newbloc = (t_header *)header_bloc + nunites;
      header_newbloc->size = header_bloc->size - nunites;
      header_bloc->next = (t_header *)0x42;
      myfree((void *)((t_header *)header_newbloc + 1));
      return (ptr);
   }
 return (NULL);
}
Exemple #5
0
void		*mymalloc(size_t taille)
{
  unsigned  int	nbblocks;
  t_header	*cur;
/*  my_putstr("############## MALLOC #########  taille");
  my_put_nbr(taille);
  my_putchar('\n');*/
  cpt++;
  nbblocks = get_real_size(taille);
  if (taille <= 0)
    return (NULL);
  if (!start)
    {
      start_free.next = &start_free;
      start_free.size = 0;
      start_alloc.next = &start_alloc;
      start_alloc.size = 0;
      start = 1;
      min_addr = sbrk(0);
    }
  if ((cur = find_freeblock(nbblocks)) == NULL)
    {
      if (ask_memory(nbblocks) == 0)
	return (NULL);
    }
  else
    {
      cur->size = get_real_size(taille);
      add_list_alloc(&start_alloc, cur);
      return ((t_header *)cur + 1);
    }
  if ((cur = find_freeblock(nbblocks)) == NULL)
    return (NULL);
  else
    {
      add_list_alloc(&start_alloc, cur);
      cur->size = get_real_size(taille);
      return ((t_header *)cur + 1);
    }
}
void kfree (void *addr) {
    if (addr < MEMORY_START || addr > MEMORY_END) {
        return;
    }
    Kmalloc_header *chunk = get_header(addr);
    chunk->user = MEMORY_OWNER_FREE;

    Kmalloc_header *other;
    do {
        other = next(chunk);
        if (other->user == MEMORY_OWNER_FREE) {
            chunk->size += get_real_size(other->size);
        }
    } while (other < (Kmalloc_header *)MEMORY_END && other->user == MEMORY_OWNER_FREE);
}
void *kmalloc (u16 owner, u16 size) {

    u16 real_size = get_real_size(size);

    free_mem -= real_size;

    if (size > free_mem) {
        kpanic ("Out of memory");
    }

    Kmalloc_header *chunk = MEMORY_START;
    while (true) {
        if ((void *)chunk >= MEMORY_END) {
            kpanic ("Out of memory");
        }

        if (chunk->user == MEMORY_OWNER_FREE && chunk->size == real_size) {
            chunk->user = owner;
            break;
        } else if (chunk->user == MEMORY_OWNER_FREE && chunk->size > real_size) {
            // We split the chunk in two
            chunk->data[size] = MEMORY_OWNER_FREE;
            chunk->data[size + 1] = chunk->size - real_size;

            // And now it's ours
            chunk->user = owner;
            chunk->size = size;

            break;
        }

        chunk = next(chunk);
    }

    for (u16 i = 0; i < size; ++i) {
        chunk->data[i] = 0;
    }

    return chunk->data;
}
Exemple #8
0
bg_shm_t * bg_shm_alloc_read(int id, int size)
  {
  void * addr;
  bg_shm_t * ret = NULL;
  int shm_fd;
  char name[SHM_NAME_MAX];
  int real_size = get_real_size(size);
  
  gen_name(id, name);
  
  shm_fd = shm_open(name, O_RDWR, 0);
  if(shm_fd < 0)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,
           "shm_open of %s failed: %s", name, strerror(errno));
    goto fail;
    }
  if((addr = mmap(0, real_size, PROT_READ | PROT_WRITE, MAP_SHARED,
                  shm_fd, 0)) == MAP_FAILED)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,
           "mmap failed: %s", strerror(errno));
    goto fail;
    }
  
  ret = calloc(1, sizeof(*ret));
  ret->addr = addr;
  ret->size = size;
  ret->rc = (refcounter_t*)(ret->addr + align_size(size));
  ret->id = id;

  bg_log(BG_LOG_DEBUG, LOG_DOMAIN,
         "created shm segment (read) %s", name);

  fail:
  if(shm_fd >= 0)
    close(shm_fd);
  
  return ret;
  }
Exemple #9
0
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	struct mlx4_en_priv *priv = netdev_priv(dev);
	union mlx4_wqe_qpn_vlan	qpn_vlan = {};
	struct mlx4_en_tx_ring *ring;
	struct mlx4_en_tx_desc *tx_desc;
	struct mlx4_wqe_data_seg *data;
	struct mlx4_en_tx_info *tx_info;
	int tx_ind;
	int nr_txbb;
	int desc_size;
	int real_size;
	u32 index, bf_index;
	__be32 op_own;
	int lso_header_size;
	void *fragptr = NULL;
	bool bounce = false;
	bool send_doorbell;
	bool stop_queue;
	bool inline_ok;
	u8 data_offset;
	u32 ring_cons;
	bool bf_ok;

	tx_ind = skb_get_queue_mapping(skb);
	ring = priv->tx_ring[TX][tx_ind];

	if (unlikely(!priv->port_up))
		goto tx_drop;

	/* fetch ring->cons far ahead before needing it to avoid stall */
	ring_cons = READ_ONCE(ring->cons);

	real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
				  &inline_ok, &fragptr);
	if (unlikely(!real_size))
		goto tx_drop_count;

	/* Align descriptor to TXBB size */
	desc_size = ALIGN(real_size, TXBB_SIZE);
	nr_txbb = desc_size >> LOG_TXBB_SIZE;
	if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
		if (netif_msg_tx_err(priv))
			en_warn(priv, "Oversized header or SG list\n");
		goto tx_drop_count;
	}

	bf_ok = ring->bf_enabled;
	if (skb_vlan_tag_present(skb)) {
		u16 vlan_proto;

		qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
		vlan_proto = be16_to_cpu(skb->vlan_proto);
		if (vlan_proto == ETH_P_8021AD)
			qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
		else if (vlan_proto == ETH_P_8021Q)
			qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
		else
			qpn_vlan.ins_vlan = 0;
		bf_ok = false;
	}

	netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);

	/* Track current inflight packets for performance analysis */
	AVG_PERF_COUNTER(priv->pstats.inflight_avg,
			 (u32)(ring->prod - ring_cons - 1));

	/* Packet is good - grab an index and transmit it */
	index = ring->prod & ring->size_mask;
	bf_index = ring->prod;

	/* See if we have enough space for whole descriptor TXBB for setting
	 * SW ownership on next descriptor; if not, use a bounce buffer. */
	if (likely(index + nr_txbb <= ring->size))
		tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
	else {
		tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
		bounce = true;
		bf_ok = false;
	}

	/* Save skb in tx_info ring */
	tx_info = &ring->tx_info[index];
	tx_info->skb = skb;
	tx_info->nr_txbb = nr_txbb;

	if (!lso_header_size) {
		data = &tx_desc->data;
		data_offset = offsetof(struct mlx4_en_tx_desc, data);
	} else {
Exemple #10
0
bg_shm_t * bg_shm_alloc_write(int size)
  {
  int shm_fd = -1;
  void * addr;
  bg_shm_t * ret = NULL;
  char name[SHM_NAME_MAX];
  int id = 0;
  pthread_mutexattr_t attr;
  int real_size = get_real_size(size);
  
  while(1)
    {
    id++;
    
    gen_name(id, name);
    
    if((shm_fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL,
                          S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)) < 0)
      {
      if(errno != EEXIST)
        {
        bg_log(BG_LOG_ERROR, LOG_DOMAIN,
               "shm_open of %s failed: %s", name, strerror(errno));
        return NULL;
        }
      }
    else
      break;
    }
  
  if(ftruncate(shm_fd, real_size))
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,
           "ftruncate failed: %s", strerror(errno));
    goto fail;
    }

  if((addr = mmap(0, real_size, PROT_READ | PROT_WRITE,
                  MAP_SHARED, shm_fd, 0)) == MAP_FAILED)
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,
           "mmap failed: %s", strerror(errno));
    return NULL;
    }

  ret = calloc(1, sizeof(*ret));
  ret->addr = addr;
  ret->size = size;
  ret->id = id;
  ret->wr = 1;
  ret->rc = (refcounter_t*)(ret->addr + align_size(size));

  /* Initialize process shared mutex */

  pthread_mutexattr_init(&attr);
  if(pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED))
    {
    bg_log(BG_LOG_ERROR, LOG_DOMAIN,
           "cannot create process shared mutex: %s", strerror(errno));
    goto fail;
    }
  pthread_mutex_init(&ret->rc->mutex, &attr);

  bg_log(BG_LOG_DEBUG, LOG_DOMAIN,
         "created shm segment (write) %s", name);

  ret->rc->refcount = 0;
  fail:
  
  close(shm_fd);
  
  return ret;
  }