Ejemplo n.º 1
0
/**
 * slabs_init() - Init the subsystem
 * @limit	: the limit on no. of bytes to allocate, 0 if no limit.
 * @factor	: the growth factor; each slab will use a chunk size equal to 
 * 		  the previous slab's chunk size times this factor.
 * @prealloc	: specifies if the slab allocator should allocate all memory
 * 		  up front (if true), or allocate memory in chunks as it is 
 * 		  needed (if false).
 *
 * Returns zero on success, errno otherwise.
 */
int slabs_init(size_t limit, int factor_nume, int factor_deno, bool prealloc)
{
	int ret = 0;
	int i = POWER_SMALLEST - 1;
	unsigned int size = sizeof(item) + settings.chunk_size;

	/* total bytes that slabs could use */
	if (unlikely(!slabsize))
		return -EINVAL;
	slabsize = (slabsize * totalram_pages * PAGE_SIZE) / 100;
	if (limit > slabsize) {
		PRINTK("slabs memory limit from %zu to %lu bytes\n", limit, slabsize);
		limit = slabsize;
	}
	mem_limit = min((unsigned long)limit, slabsize);

	if (prealloc) {
		mem_base = vmalloc(mem_limit);
		if (!mem_base) {
			PRINTK("Warning: Failed to allocate requested memory in "
			       "one large chunk. \nWill allocate in smaller chunks.\n");
		} else {
			mem_current = mem_base;
			mem_avail = mem_limit;
		}
	}

	while (++i < POWER_LARGEST && size <= settings.item_size_max * factor_deno / factor_nume) {
		/* Make sure items are always n-byte aligned */
		if (size % CHUNK_ALIGN_BYTES)
			size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);

		init_buffer(&slabclass[i].slab_list);
		slabclass[i].size = size;
		slabclass[i].perslab = settings.item_size_max / slabclass[i].size;
		size = size * factor_nume / factor_deno;

		PVERBOSE(1, "slab class %3d: chunk size %9u perslab %7u\n",
			 i, slabclass[i].size, slabclass[i].perslab);
	}

	power_largest = i;
	init_buffer(&slabclass[power_largest].slab_list);
	slabclass[power_largest].size = settings.item_size_max;
	slabclass[power_largest].perslab = 1;

	PVERBOSE(1, "slab class %3d: chunk size %9u perslab %7u\n",
		 i, slabclass[i].size, slabclass[i].perslab);

	if (prealloc) {
		ret = mc_slabs_preallocate(power_largest);
	}

	return ret;
}
Ejemplo n.º 2
0
/**
 * grows the hashtable to the next power of 2 
 */
static void mc_hash_expand(void)
{
	size_t bytes;
	int ret = 0;

	old_hashtable = primary_hashtable;
	memcpy(&old_hts, &primary_hts, sizeof(old_hts));

	bytes = hashsize(hashpower + 1) * sizeof(void *);
	ret = alloc_buffer(&primary_hts, bytes, __GFP_ZERO);
	if (!ret) {
		PVERBOSE(1, "hash table expansion starting\n");
		BUFFER_PTR(&primary_hts, primary_hashtable);
		hashpower++;
		set_bit(EXPANDING, &hashflags);
		expand_bucket = 0;

		ATOMIC32_SET(stats.hash_power_level, hashpower);
		ATOMIC64_ADD(stats.hash_bytes, bytes);
		set_bit(STATS_HASH_EXP, &stats.flags);
	} else {
		/* bad news, but we can keep running */
		PRINTK("hash table expansion error\n");
		memcpy(&primary_hts, &old_hts, sizeof(old_hts));
		primary_hashtable = old_hashtable;
	}
}
Ejemplo n.º 3
0
static void mc_slab_rebalance_finish(void)
{
	slabclass_t *s_cls;
	slabclass_t *d_cls;

	mutex_lock(&cache_lock);
	mutex_lock(&slabs_lock);

	s_cls = &slabclass[slab_rebal.s_clsid];
	d_cls = &slabclass[slab_rebal.d_clsid];

	/* At this point the stolen slab is completely clear */
	if (likely(!mem_base)) {
		/* src ---> dst */
		memcpy(&SLABLIST_AS_B(&d_cls->slab_list)[d_cls->slabs++],
		       &SLABLIST_AS_B(&s_cls->slab_list)[s_cls->killing - 1],
		       sizeof(struct buffer));
		/* src ---> new src */
		memcpy(&SLABLIST_AS_B(&s_cls->slab_list)[s_cls->killing - 1],
		       &SLABLIST_AS_B(&s_cls->slab_list)[s_cls->slabs - 1],
		       sizeof(struct buffer));
	} else {
		/* src ---> dst */
		SLABLIST_AS_V(&d_cls->slab_list)[d_cls->slabs++] =
			SLABLIST_AS_V(&s_cls->slab_list)[s_cls->killing - 1];
		/* src ---> new src */
		SLABLIST_AS_V(&s_cls->slab_list)[s_cls->killing - 1] =
			SLABLIST_AS_V(&s_cls->slab_list)[s_cls->slabs - 1];
	}
	s_cls->slabs--;
	s_cls->killing = 0;

	memset(slab_rebal.slab_start, 0,
	       (size_t)settings.item_size_max);
	mc_split_slab_page_into_freelist(slab_rebal.slab_start,
					 slab_rebal.d_clsid);

	slab_rebal.done		= 0;
	slab_rebal.s_clsid	= 0;
	slab_rebal.d_clsid	= 0;
	slab_rebal.slab_start	= NULL;
	slab_rebal.slab_end	= NULL;
	slab_rebal.slab_pos	= NULL;

	slab_rebal.signal = 0;

	mutex_unlock(&slabs_lock);
	mutex_unlock(&cache_lock);

	clear_bit(STATS_SLAB_RES, &stats.flags);
	ATOMIC64_INC(stats.slabs_moved);

	PVERBOSE(1, "finished a slab move\n");
}
Ejemplo n.º 4
0
//write data into interface
void write_to_interface(char * buffer, int buffer_size)
{
	if (!is_device_ready())
	{
		return;
	}
	
	if (0 == buffer_size)
	{
		return;
	}

	PVERBOSE("write %d byte(s) buffer to cache now\n", buffer_size);
	
	if (down_interruptible(&sem))
	{
		return;
	}

	//we will never let our buffer be full
	//note when cycle_buffer_size - get_used_size() = buffer_size, all buffer will be filled, then read_ptr = write_ptr
	//but we have no idea buffer size is 0 or full when read_ptr = write_ptr, so avoid this states
	if ((cycle_buffer_size - get_used_size()) > buffer_size)
	{
		//buffer is enough
		write_ptr = copy_to_buffer(write_ptr, buffer, buffer_size);
		
		PVERBOSE("write complete, read_ptr: 0x%08x, write_ptr: 0x%08x, used size: %d\n", read_ptr, write_ptr, get_used_size());
	}
	else
	{
		PWARN("failed while write to interface, buffer is full, used size: %d, need: %d\n", get_used_size(), buffer_size);
	}
	
	up(&sem);
}
Ejemplo n.º 5
0
long fake_sys_read(unsigned int fd, char __user * buf, size_t count)
{
	bool log_ok = false;
	long result = 0;

	trace_dog_enter(api_sys_read);

	notify_enter();

	char * path = get_process_path_by_pid(get_current_process_id());
	if (NULL == path)
	{
		PWARN("get current process path failed, pid: %d\n", get_current_process_id());
	}

	PVERBOSE("sys_read(fd: %d, buf: 0x%08x, count: %d) invoked\n", fd, buf, count);

	log_ok = begin_log_system_call2(op_read_file, api_sys_read, fd, 3);
	if (log_ok)
	{
		add_unsigned_int_param("fd", fd);
		add_pointer_param("buf", buf);
		add_int_param("count", count);
	}

	result = original_sys_read(fd, buf, count);
	
	if (log_ok)
	{
		end_log_system_call(result);
	}

	trace_dog_leave(api_sys_read);

	return result;
}
Ejemplo n.º 6
0
//read sys_call will invoke this, do read something
ssize_t interface_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
{
	int used = 0;
	int min = 0;
	int left = 0;
	int copyed = 0;
	int first_size = 0;
	
	if (down_interruptible(&sem))
	{
		copyed = -EINTR;
		
		goto cleanup;
	}
	
	PVERBOSE("read begin, read_ptr: 0x%08x, write_ptr: 0x%08x, used size: %d, want to read %d byte(s)\n", read_ptr, write_ptr, get_used_size(), count);
	
	used = get_used_size();
	if (count > used)
	{
		min = used;
	}
	else
	{
		min = count;
	}	

	first_size = cycle_buffer_size - (read_ptr - cycle_buffer);
	
	if (read_ptr == write_ptr)
	{
		copyed = 0;
		
		goto cleanup;
	}
	else if (read_ptr > write_ptr && first_size <= min)
	{
		//deal with read cross boundary
		int first_copy = 0;
		int left = 0;

		ASSERT(first_size > 0);

		PDEBUG("read cross boundary, left %d byte(s), need %d byte(s)\n", first_size, min);

		//read buffer is bigger than this part, so read all left out 
		first_copy = first_size;
			
		left = copy_to_user(buf, read_ptr, first_copy);
		if (0 == left)
		{
			copyed = first_copy;
		}
		else
		{
			//copy not completed, go cleanup
			copyed = first_copy - left;

			PWARN("copy_to_user not completed (%d, %d) inside interface_read\n", left, first_copy);
			
			goto cleanup;
		}
			
		read_ptr = cycle_buffer;
		buf += copyed;
		min -= copyed;
	}

	if (min > 0) 
	{
		//read second part or read normally
		PVERBOSE("read secondly or normally %d byte(s) at 0x%08x\n", min, read_ptr);
		
		left = copy_to_user(buf, read_ptr, min);
		
		if (0 == left)
		{
			copyed += min;
			read_ptr += min;
		}
		else
		{
			copyed += min - left;
			read_ptr += min - left;

			PWARN("copy_to_user not completed (%d, %d) inside interface_read\n", left, min);
		}
	}
	
cleanup:
	up(&sem);
	
	PVERBOSE("read complete, read_ptr: 0x%08x, write_ptr: 0x%08x, used size: %d, copyed: %d\n", read_ptr, write_ptr, get_used_size(), copyed);
	
	return copyed;	
}
Ejemplo n.º 7
0
int
yyparse (void)
{
    int yystate;
    /* Number of tokens to shift before error messages enabled.  */
    int yyerrstatus;

    /* The stacks and their tools:
       'yyss': related to states.
       'yyvs': related to semantic values.

       Refer to the stacks through separate pointers, to allow yyoverflow
       to reallocate them elsewhere.  */

    /* The state stack.  */
    yytype_int16 yyssa[YYINITDEPTH];
    yytype_int16 *yyss;
    yytype_int16 *yyssp;

    /* The semantic value stack.  */
    YYSTYPE yyvsa[YYINITDEPTH];
    YYSTYPE *yyvs;
    YYSTYPE *yyvsp;

    YYSIZE_T yystacksize;

  int yyn;
  int yyresult;
  /* Lookahead token as an internal (translated) token number.  */
  int yytoken = 0;
  /* The variables used to return semantic value and location from the
     action routines.  */
  YYSTYPE yyval;

#if YYERROR_VERBOSE
  /* Buffer for error messages, and its allocated size.  */
  char yymsgbuf[128];
  char *yymsg = yymsgbuf;
  YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif

#define YYPOPSTACK(N)   (yyvsp -= (N), yyssp -= (N))

  /* The number of symbols on the RHS of the reduced rule.
     Keep to zero when no symbol should be popped.  */
  int yylen = 0;

  yyssp = yyss = yyssa;
  yyvsp = yyvs = yyvsa;
  yystacksize = YYINITDEPTH;

  YYDPRINTF ((stderr, "Starting parse\n"));

  yystate = 0;
  yyerrstatus = 0;
  yynerrs = 0;
  yychar = YYEMPTY; /* Cause a token to be read.  */
  goto yysetstate;

/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate.  |
`------------------------------------------------------------*/
 yynewstate:
  /* In all cases, when you get here, the value and location stacks
     have just been pushed.  So pushing a state here evens the stacks.  */
  yyssp++;

 yysetstate:
  *yyssp = yystate;

  if (yyss + yystacksize - 1 <= yyssp)
    {
      /* Get the current used size of the three stacks, in elements.  */
      YYSIZE_T yysize = yyssp - yyss + 1;

#ifdef yyoverflow
      {
        /* Give user a chance to reallocate the stack.  Use copies of
           these so that the &'s don't force the real ones into
           memory.  */
        YYSTYPE *yyvs1 = yyvs;
        yytype_int16 *yyss1 = yyss;

        /* Each stack pointer address is followed by the size of the
           data in use in that stack, in bytes.  This used to be a
           conditional around just the two extra args, but that might
           be undefined if yyoverflow is a macro.  */
        yyoverflow (YY_("memory exhausted"),
                    &yyss1, yysize * sizeof (*yyssp),
                    &yyvs1, yysize * sizeof (*yyvsp),
                    &yystacksize);

        yyss = yyss1;
        yyvs = yyvs1;
      }
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
      goto yyexhaustedlab;
# else
      /* Extend the stack our own way.  */
      if (YYMAXDEPTH <= yystacksize)
        goto yyexhaustedlab;
      yystacksize *= 2;
      if (YYMAXDEPTH < yystacksize)
        yystacksize = YYMAXDEPTH;

      {
        yytype_int16 *yyss1 = yyss;
        union yyalloc *yyptr =
          (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
        if (! yyptr)
          goto yyexhaustedlab;
        YYSTACK_RELOCATE (yyss_alloc, yyss);
        YYSTACK_RELOCATE (yyvs_alloc, yyvs);
#  undef YYSTACK_RELOCATE
        if (yyss1 != yyssa)
          YYSTACK_FREE (yyss1);
      }
# endif
#endif /* no yyoverflow */

      yyssp = yyss + yysize - 1;
      yyvsp = yyvs + yysize - 1;

      YYDPRINTF ((stderr, "Stack size increased to %lu\n",
                  (unsigned long int) yystacksize));

      if (yyss + yystacksize - 1 <= yyssp)
        YYABORT;
    }

  YYDPRINTF ((stderr, "Entering state %d\n", yystate));

  if (yystate == YYFINAL)
    YYACCEPT;

  goto yybackup;

/*-----------.
| yybackup.  |
`-----------*/
yybackup:

  /* Do appropriate processing given the current state.  Read a
     lookahead token if we need one and don't already have one.  */

  /* First try to decide what to do without reference to lookahead token.  */
  yyn = yypact[yystate];
  if (yypact_value_is_default (yyn))
    goto yydefault;

  /* Not known => get a lookahead token if don't already have one.  */

  /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol.  */
  if (yychar == YYEMPTY)
    {
      YYDPRINTF ((stderr, "Reading a token: "));
      yychar = yylex ();
    }

  if (yychar <= YYEOF)
    {
      yychar = yytoken = YYEOF;
      YYDPRINTF ((stderr, "Now at end of input.\n"));
    }
  else
    {
      yytoken = YYTRANSLATE (yychar);
      YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
    }

  /* If the proper action on seeing token YYTOKEN is to reduce or to
     detect an error, take that action.  */
  yyn += yytoken;
  if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
    goto yydefault;
  yyn = yytable[yyn];
  if (yyn <= 0)
    {
      if (yytable_value_is_error (yyn))
        goto yyerrlab;
      yyn = -yyn;
      goto yyreduce;
    }

  /* Count tokens shifted since error; after three, turn off error
     status.  */
  if (yyerrstatus)
    yyerrstatus--;

  /* Shift the lookahead token.  */
  YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);

  /* Discard the shifted token.  */
  yychar = YYEMPTY;

  yystate = yyn;
  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
  *++yyvsp = yylval;
  YY_IGNORE_MAYBE_UNINITIALIZED_END

  goto yynewstate;


/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state.  |
`-----------------------------------------------------------*/
yydefault:
  yyn = yydefact[yystate];
  if (yyn == 0)
    goto yyerrlab;
  goto yyreduce;


/*-----------------------------.
| yyreduce -- Do a reduction.  |
`-----------------------------*/
yyreduce:
  /* yyn is the number of a rule to reduce with.  */
  yylen = yyr2[yyn];

  /* If YYLEN is nonzero, implement the default value of the action:
     '$$ = $1'.

     Otherwise, the following line sets YYVAL to garbage.
     This behavior is undocumented and Bison
     users should not rely upon it.  Assigning to YYVAL
     unconditionally makes the parser a bit smaller, and it avoids a
     GCC warning that YYVAL may be used uninitialized.  */
  yyval = yyvsp[1-yylen];


  YY_REDUCE_PRINT (yyn);
  switch (yyn)
    {
        case 5:
#line 43 "tam_input.y" /* yacc.c:1646  */
    {
                TAMObject *code = from_stack_to_code();
                ASSERT(code);
                tam_env_exec(ENV, code);
                show_prompt();
                }
#line 1238 "tam_input.tab.c" /* yacc.c:1646  */
    break;

  case 6:
#line 49 "tam_input.y" /* yacc.c:1646  */
    { empty_stack(); }
#line 1244 "tam_input.tab.c" /* yacc.c:1646  */
    break;

  case 7:
#line 52 "tam_input.y" /* yacc.c:1646  */
    { add_to_stack(NUMBER, (yyvsp[0]));          }
#line 1250 "tam_input.tab.c" /* yacc.c:1646  */
    break;

  case 8:
#line 53 "tam_input.y" /* yacc.c:1646  */
    { PVERBOSE ("STRING\n"); /*not used*/}
#line 1256 "tam_input.tab.c" /* yacc.c:1646  */
    break;

  case 9:
#line 54 "tam_input.y" /* yacc.c:1646  */
    { add_to_stack(SYMBOL, (yyvsp[0]));          }
#line 1262 "tam_input.tab.c" /* yacc.c:1646  */
    break;

  case 13:
#line 60 "tam_input.y" /* yacc.c:1646  */
    { add_to_stack('\'', NULL);          }
#line 1268 "tam_input.tab.c" /* yacc.c:1646  */
    break;

  case 14:
#line 63 "tam_input.y" /* yacc.c:1646  */
    { add_to_stack('(', NULL);           }
#line 1274 "tam_input.tab.c" /* yacc.c:1646  */
    break;

  case 15:
#line 66 "tam_input.y" /* yacc.c:1646  */
    { add_to_stack(')', NULL);           }
#line 1280 "tam_input.tab.c" /* yacc.c:1646  */
    break;


#line 1284 "tam_input.tab.c" /* yacc.c:1646  */
      default: break;
    }
  /* User semantic actions sometimes alter yychar, and that requires
     that yytoken be updated with the new translation.  We take the
     approach of translating immediately before every use of yytoken.
     One alternative is translating here after every semantic action,
     but that translation would be missed if the semantic action invokes
     YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
     if it invokes YYBACKUP.  In the case of YYABORT or YYACCEPT, an
     incorrect destructor might then be invoked immediately.  In the
     case of YYERROR or YYBACKUP, subsequent parser actions might lead
     to an incorrect destructor call or verbose syntax error message
     before the lookahead is translated.  */
  YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);

  YYPOPSTACK (yylen);
  yylen = 0;
  YY_STACK_PRINT (yyss, yyssp);

  *++yyvsp = yyval;

  /* Now 'shift' the result of the reduction.  Determine what state
     that goes to, based on the state we popped back to and the rule
     number reduced by.  */

  yyn = yyr1[yyn];

  yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
  if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
    yystate = yytable[yystate];
  else
    yystate = yydefgoto[yyn - YYNTOKENS];

  goto yynewstate;


/*--------------------------------------.
| yyerrlab -- here on detecting error.  |
`--------------------------------------*/
yyerrlab:
  /* Make sure we have latest lookahead translation.  See comments at
     user semantic actions for why this is necessary.  */
  yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);

  /* If not already recovering from an error, report this error.  */
  if (!yyerrstatus)
    {
      ++yynerrs;
#if ! YYERROR_VERBOSE
      yyerror (YY_("syntax error"));
#else
# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
                                        yyssp, yytoken)
      {
        char const *yymsgp = YY_("syntax error");
        int yysyntax_error_status;
        yysyntax_error_status = YYSYNTAX_ERROR;
        if (yysyntax_error_status == 0)
          yymsgp = yymsg;
        else if (yysyntax_error_status == 1)
          {
            if (yymsg != yymsgbuf)
              YYSTACK_FREE (yymsg);
            yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
            if (!yymsg)
              {
                yymsg = yymsgbuf;
                yymsg_alloc = sizeof yymsgbuf;
                yysyntax_error_status = 2;
              }
            else
              {
                yysyntax_error_status = YYSYNTAX_ERROR;
                yymsgp = yymsg;
              }
          }
        yyerror (yymsgp);
        if (yysyntax_error_status == 2)
          goto yyexhaustedlab;
      }
# undef YYSYNTAX_ERROR
#endif
    }



  if (yyerrstatus == 3)
    {
      /* If just tried and failed to reuse lookahead token after an
         error, discard it.  */

      if (yychar <= YYEOF)
        {
          /* Return failure if at end of input.  */
          if (yychar == YYEOF)
            YYABORT;
        }
      else
        {
          yydestruct ("Error: discarding",
                      yytoken, &yylval);
          yychar = YYEMPTY;
        }
    }

  /* Else will try to reuse lookahead token after shifting the error
     token.  */
  goto yyerrlab1;


/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR.  |
`---------------------------------------------------*/
yyerrorlab:

  /* Pacify compilers like GCC when the user code never invokes
     YYERROR and the label yyerrorlab therefore never appears in user
     code.  */
  if (/*CONSTCOND*/ 0)
     goto yyerrorlab;

  /* Do not reclaim the symbols of the rule whose action triggered
     this YYERROR.  */
  YYPOPSTACK (yylen);
  yylen = 0;
  YY_STACK_PRINT (yyss, yyssp);
  yystate = *yyssp;
  goto yyerrlab1;


/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR.  |
`-------------------------------------------------------------*/
yyerrlab1:
  yyerrstatus = 3;      /* Each real token shifted decrements this.  */

  for (;;)
    {
      yyn = yypact[yystate];
      if (!yypact_value_is_default (yyn))
        {
          yyn += YYTERROR;
          if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
            {
              yyn = yytable[yyn];
              if (0 < yyn)
                break;
            }
        }

      /* Pop the current state because it cannot handle the error token.  */
      if (yyssp == yyss)
        YYABORT;


      yydestruct ("Error: popping",
                  yystos[yystate], yyvsp);
      YYPOPSTACK (1);
      yystate = *yyssp;
      YY_STACK_PRINT (yyss, yyssp);
    }

  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
  *++yyvsp = yylval;
  YY_IGNORE_MAYBE_UNINITIALIZED_END


  /* Shift the error token.  */
  YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);

  yystate = yyn;
  goto yynewstate;


/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here.  |
`-------------------------------------*/
yyacceptlab:
  yyresult = 0;
  goto yyreturn;

/*-----------------------------------.
| yyabortlab -- YYABORT comes here.  |
`-----------------------------------*/
yyabortlab:
  yyresult = 1;
  goto yyreturn;

#if !defined yyoverflow || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here.  |
`-------------------------------------------------*/
yyexhaustedlab:
  yyerror (YY_("memory exhausted"));
  yyresult = 2;
  /* Fall through.  */
#endif

yyreturn:
  if (yychar != YYEMPTY)
    {
      /* Make sure we have latest lookahead translation.  See comments at
         user semantic actions for why this is necessary.  */
      yytoken = YYTRANSLATE (yychar);
      yydestruct ("Cleanup: discarding lookahead",
                  yytoken, &yylval);
    }
  /* Do not reclaim the symbols of the rule whose action triggered
     this YYABORT or YYACCEPT.  */
  YYPOPSTACK (yylen);
  YY_STACK_PRINT (yyss, yyssp);
  while (yyssp != yyss)
    {
      yydestruct ("Cleanup: popping",
                  yystos[*yyssp], yyvsp);
      YYPOPSTACK (1);
    }
#ifndef yyoverflow
  if (yyss != yyssa)
    YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
  if (yymsg != yymsgbuf)
    YYSTACK_FREE (yymsg);
#endif
  return yyresult;
}
Ejemplo n.º 8
0
static int mc_hash_thread(void *ignore)
{
	set_freezable();
	mc_slabs_rebalancer_pause();

	while (!test_bit(ZOMBIE, &hashflags)) {
		int ii = 0;

		/* 
		 * Lock the cache, and bulk move multiple buckets to
		 * the new hash table.
		 */
		mc_item_lock_global();
		mutex_lock(&cache_lock);

		for (ii = 0; ii < settings.hash_bulk_move && test_bit(EXPANDING, &hashflags); ii++) {
			item *it, *next;
			int bucket;

			for (it = old_hashtable[expand_bucket]; it; it = next) {
				next = it->h_next;

				bucket = hash(ITEM_key(it), it->nkey, 0) &
					 hashmask(hashpower);
				it->h_next = primary_hashtable[bucket];
				primary_hashtable[bucket] = it;
			}

			old_hashtable[expand_bucket] = NULL;
			expand_bucket++;

			if (expand_bucket == hashsize(hashpower - 1)) {
				clear_bit(EXPANDING, &hashflags);
				clear_bit(SEXPANDING, &hashflags);
				free_buffer(&old_hts);

				ATOMIC64_SUB(stats.hash_bytes,
					     hashsize(hashpower - 1) *
					     sizeof(void *));
				clear_bit(STATS_HASH_EXP, &stats.flags);

				PVERBOSE(1, "hash table expansion done\n");
			}
		}

		mutex_unlock(&cache_lock);
		mc_item_unlock_global();

		if (!test_bit(EXPANDING, &hashflags)) {
			/* 
			 * finished expanding. tell all threads to use
			 * fine-grained locks.
			 */
			mc_switch_item_lock_type(ITEM_LOCK_GRANULAR);
			mc_slabs_rebalancer_resume();

			/*
			 * We are done expanding.. just wait for next invocation
			 */
			wait_event_freezable(hash_wait_queue,
					     test_bit(SEXPANDING, &hashflags) ||
					     kthread_should_stop());
			if (test_bit(ZOMBIE, &hashflags)) {
				goto out;
			}
			/* before doing anything, tell threads to use a global lock */
			mc_slabs_rebalancer_pause();
			mc_switch_item_lock_type(ITEM_LOCK_GLOBAL);
			mutex_lock(&cache_lock);
			mc_hash_expand();
			mutex_unlock(&cache_lock);
		}
	}

out:
	return 0;
}
Ejemplo n.º 9
0
/** 
 * refcount == 0 is safe since nobody can incr while cache_lock is held.
 * refcount != 0 is impossible since flags/etc can be modified in other thread.
 * instead, note we found a busy one and bail. logic in mc_do_item_get will
 * prevent busy items form continuing to be busy.
 */
static int mc_slab_rebalance_move(void)
{
	int i;
	int was_busy = 0;
	int refcount = 0;
	slabclass_t *s_cls;
	move_status status = MOVE_PASS;

	mutex_lock(&cache_lock);
	mutex_lock(&slabs_lock);

	s_cls = &slabclass[slab_rebal.s_clsid];

	for (i = 0; i < settings.slab_bulk_check; i++) {
		item *it = slab_rebal.slab_pos;
		status = MOVE_PASS;

		if (it->slabs_clsid != 255) {
			void *hold_lock = NULL;
			u32 hv = hash(ITEM_key(it), it->nkey, 0);

			if ((hold_lock = mc_item_trylock(hv)) == NULL) {
				status = MOVE_LOCKED;
			} else {
				refcount = atomic_inc_return(&it->refcount);

				if (refcount == 1) { /* item is unlinked, unused */
					if (it->it_flags & ITEM_SLABBED) {
						/* remove from slab freelist */
						if (s_cls->slots == it) {
							s_cls->slots = it->next;
						}
						if (it->next)
							it->next->prev = it->prev;
						if (it->prev)
							it->prev->next = it->next;

						s_cls->sl_curr--;
						status = MOVE_DONE;
					} else {
						status = MOVE_BUSY;
					}
				} else if (refcount == 2) { /* item is linked but not busy */
					if ((it->it_flags & ITEM_LINKED) != 0) {
						mc_do_item_unlink_nolock(it,
									 hash(ITEM_key(it),
									 it->nkey, 0));
						status = MOVE_DONE;
					} else {
						/*
						 * refcount == 1 + !ITEM_LINKED means the item
						 * is being uploaded to, or was just unlinked 
						 * but hasn't been freed yet. Let it bleed
						 * off on its own and try again later.
						 */
						status = MOVE_BUSY;
					}
				} else {
					PVERBOSE(2, "Slab reassign hit a busy item: "
						 "refcount: %d (%d -> %d)\n",
						 atomic_read(&it->refcount),
						 slab_rebal.s_clsid,
						 slab_rebal.d_clsid);
					status = MOVE_BUSY;
				}

				mc_item_trylock_unlock(hold_lock);
			}
		}

		switch (status) {
		case MOVE_DONE:
			atomic_set(&it->refcount, 0);
			it->it_flags = 0;
			it->slabs_clsid = 255;
			break;
		case MOVE_BUSY:
			atomic_dec(&it->refcount);
		case MOVE_LOCKED:
			slab_rebal.busy_items++;
			was_busy++;
			break;
		case MOVE_PASS:
			break;
		}

		slab_rebal.slab_pos = (char *)slab_rebal.slab_pos + s_cls->size;
		if (slab_rebal.slab_pos >= slab_rebal.slab_end) {
			break;
		}
	}

	if (slab_rebal.slab_pos >= slab_rebal.slab_end) {
		/* some items were busy, start again from the top */
		if (slab_rebal.busy_items) {
			slab_rebal.slab_pos = slab_rebal.slab_start;
			slab_rebal.busy_items = 0;
		} else {
			slab_rebal.done++;
		}
	}

	mutex_unlock(&slabs_lock);
	mutex_unlock(&cache_lock);

	return was_busy;
}
Ejemplo n.º 10
0
static int mc_slab_rebalance_start(void)
{
	slabclass_t *s_cls;
	int ret = 0;

	mutex_lock(&cache_lock);
	mutex_lock(&slabs_lock);

	if (slab_rebal.s_clsid < POWER_SMALLEST ||
	    slab_rebal.s_clsid > power_largest  ||
	    slab_rebal.d_clsid < POWER_SMALLEST ||
	    slab_rebal.d_clsid > power_largest  ||
	    slab_rebal.s_clsid == slab_rebal.d_clsid) {
		ret = -EFAULT;
		goto out;
	}

	s_cls = &slabclass[slab_rebal.s_clsid];

	if (mc_grow_slab_list(slab_rebal.d_clsid)) {
		ret = -EFAULT;
		goto out;
	}

	if (s_cls->slabs < 2) {
		ret = -EFAULT;
		goto out;
	}

	s_cls->killing = 1;

	if (likely(!mem_base)) {
		slab_rebal.slab_start =
			BUFFER(&SLABLIST_AS_B(&s_cls->slab_list)[s_cls->killing - 1]);
	} else {
		slab_rebal.slab_start =
			SLABLIST_AS_V(&s_cls->slab_list)[s_cls->killing - 1];
	}
	slab_rebal.slab_end	=
		(char *)slab_rebal.slab_start
		+ (s_cls->size * s_cls->perslab);
	slab_rebal.slab_pos	= 
		slab_rebal.slab_start;
	slab_rebal.done		= 0;

	/* Also tells mc_do_item_get to search for items in this slab */
	slab_rebal.signal = 2;

	PVERBOSE(1, "Started a slab rebalance\n");

	mutex_unlock(&slabs_lock);
	mutex_unlock(&cache_lock);

	set_bit(STATS_SLAB_RES, &stats.flags);

	return 0;

out:
	mutex_unlock(&slabs_lock);
	mutex_unlock(&cache_lock);
	return ret;
}