Exemple #1
0
int kthread_init(const struct stack_struct *ss) {

  const int idle_task=0;
  const int boot_task=1;

  if((run_queue = kmalloc(sizeof *run_queue, GFP_KERNEL | GFP_ZERO))) {

    DEBUG_TRACE("%d = stack_check %x %x", stack_check(ss), ss->stack_base, ss->stack_size);
    spinlock_init(&run_queue->spinlock);
    run_queue->running = boot_task; 
		
    // create an empty kthread for the boot-task! UGLY!
    run_queue->kthreads[boot_task] = _kmalloc_kthread();
		
    if(run_queue->kthreads[boot_task]) {

      DEBUG_TRACE("%d = stack_check", stack_check(ss));

      // store boot_stack info.
      run_queue->kthreads[boot_task]->stack = *ss;
      
      irq_itf irq;
      if(timer_open(&run_queue->timer, &irq, 0)==0) {

	DEBUG_TRACE("");

        interrupt_controller_itf intc;
        if(interrupt_controller(&intc) == 0) {

	  DEBUG_TRACE("");

          INVOKE(intc, register_handler, irq);
          INVOKE(intc, unmask, irq);
	  
          goto success;
        }
      }
    }
  }

  goto err;

success:
  // start idle-task.
  if(_kthread_create(&run_queue->kthreads[idle_task], GFP_KERNEL, &_asm_idle_task, 0)==0)
  {
    DEBUG_TRACE("");
    _BUG_ON(!run_queue->kthreads[idle_task]);

    // UGLY - yield to self! current task is first, and only runnable thread right now.
    // we NEED to do this to populate the empty kthread we allocated for ourselves earier
    kthread_yield();

    return _sched_next_task(NULL);
  }
err:
  _BUG();
  return -1;
}
Exemple #2
0
/***********    Invoke Forground Command  *********************/
static void command_invoke(void const *args) 
{
    void (*func)(void const * ) ;
    int i,iteration ;
    
    func = (void(*)(void const *))((func_args *)args)->argv[0] ; 
    #if defined(HAVE_KEIL_RTX)
    wc_LockMutex((wolfSSL_Mutex *)&command_mutex) ;
    #endif
    iteration = for_iteration ;
    for(i=0; i< iteration; i++) {
        if(iteration > 1) printf("--- Start for %d ---->\n", i) ;
        #if defined(HAVE_KEIL_RTX) && !defined(WOLFSSL_CMSIS_RTOS)
        stack_fill(command_stack, COMMAND_STACK_SIZE) ;
        #endif
                
        func(args) ;        /* invoke command */
                
        #if defined(HAVE_KEIL_RTX)&& !defined(WOLFSSL_CMSIS_RTOS)
        stack_check(command_stack, COMMAND_STACK_SIZE) ;
        #endif
    }

    if(iteration > 1) 
    for_iteration = 1 ;
    osDelay(20000) ;
    #ifdef HAVE_KEIL_RTX
        wc_UnLockMutex((wolfSSL_Mutex *)&command_mutex) ;
        #ifdef WOLFSSL_CMSIS_RTOS
            osThreadTerminate(osThreadGetId()) ;
        #else
            os_tsk_delete_self() ;
        #endif
    #endif
}
Exemple #3
0
stack_t* stack_push(stack_t* head, stack_t* newHead)
{
#if NON_BLOCKING == 0
    pthread_mutex_lock(&mutex);
    newHead->ptr=head;
    pthread_mutex_unlock(&mutex);

#elif NON_BLOCKING == 1
  // Implement a harware CAS-based stack

    if(head == NULL)
    {
        newHead->ptr = head;
    }
    else
    {
        stack_t* old;
        do
        {
            old = head;
            newHead->ptr = old;
        }while(cas(newHead->ptr, old, head) == old);
    }
#else

  // Implement a software CAS-based stack
#endif

  // Debug practice: you can check if this operation results in a stack in a consistent check
  // It doesn't harm performance as sanity check are disabled at measurement time
  // This is to be updated as your implementation progresses
  stack_check((stack_t*)1);

  return newHead;
}
Exemple #4
0
void _arch_irq_task_switch(void * _cpu_state) {
  
  if(run_queue) {

    spinlock_lock(&run_queue->spinlock);

    get_system_time(&run_queue->sched_time);

    struct kthread * c = run_queue_current();
    struct kthread * n = run_queue_next();
		
    spinlock_unlock(&run_queue->spinlock);

    _BUG_ON(!n);
    _BUG_ON(!c);

    if(stack_check(&(c->stack))<0)
      _BUG(); // TASK WE JUST PUT TO SLEEP BLEW ITS STACK!
    
    _switch(c,n,_cpu_state);
    
    // schedule next switch.
    _sched_next_task(NULL);
  }
}
Exemple #5
0
/***********    Invoke Forground Command  *********************/
static void command_invoke(void *args) 
{
    void (*func)(void * ) ;
    int i,iteration ;

    func = (void(*)(void *))((func_args *)args)->argv[0] ; 
    #ifdef  HAVE_KEIL_RTX
    LockMutex((CyaSSL_Mutex *)&command_mutex) ;
    #endif
    iteration = for_iteration ;
    for(i=0; i< iteration; i++) {
        if(iteration > 1) printf("--- Start for %d ---->\n", i) ;
        #if defined(HAVE_KEIL_RTX)
        stack_fill(command_stack, COMMAND_STACK_SIZE) ;
        #endif
                
        func(args) ;        /* invoke command */
                
        #if defined(HAVE_KEIL_RTX)
        stack_check(command_stack, COMMAND_STACK_SIZE) ;
        #endif
    }
    if(iteration > 1) 
        for_iteration = 1 ;
    #ifdef HAVE_KEIL_RTX
    UnLockMutex((CyaSSL_Mutex *)&command_mutex) ;
    os_tsk_delete_self() ;
    #endif
}
Exemple #6
0
int
test_push_safe()
{
  // Make sure your stack remains in a good state with expected content when
  // several threads push concurrently to it
  int i;
  stack_element_t* element;  

  // Do some work
  for (i = 10; i < 20; i++) {
    element = malloc(sizeof(stack_element_t));
    element->value = i;
    stack_push(stack, element);
  }

  // check if the stack is in a consistent state
  stack_check(stack);

  // check other properties expected after a push operation
  // (this is to be updated as your stack design progresses)
  assert(stack->head->value == 19);

  // For now, this test always fails
  return 1;
}
Exemple #7
0
int main(){
    ll_check();
    stack_check();
    queue_check();

    read_check();
    path_check1();
    path_check2();

    return 0;
}
Exemple #8
0
int kthread_stack_check() {

  int i=-1;
  spinlock_lock(&run_queue->spinlock);
  struct kthread * c = run_queue_current();
  if(c)
    i = stack_check(&(c->stack));
  spinlock_unlock(&run_queue->spinlock);
  
  return i;
}
Exemple #9
0
/*
 * Add a new group onto the stack, pushing down one frame.  nobj is the number
 * of items in the group.  We have to read this many objects before popping
 * back up to an enclosing group - see next_object() and previous_object()
 * below.
 */
static int stack_new_group(ea_file_impl_t *f, int nobjs)
{
	if (stack_check(f) != 0) {
		stack_free(f);
		/* exacct_errno set above. */
		return (-1);
	}
	f->ef_ndeep++;
	f->ef_depth[f->ef_ndeep].efd_obj = 0;
	f->ef_depth[f->ef_ndeep].efd_nobjs = nobjs;
	return (0);
}
Exemple #10
0
/*******  Invoke Background Job   *******************************/
static void bg_job_invoke(void *args) 
{
    void (*func)(void * ) ;
    BackGround = 1 ; 
    stack_fill(bg_job_stack, BG_JOB_STACK_SIZE) ;
    func = (void(*)(void *))((func_args *)args)->argv[0] ; 
    func(args) ;        /* invoke command */
    stack_check(bg_job_stack, BG_JOB_STACK_SIZE) ;
    #ifdef CYASSL_KEIL_NET
    init_TcpNet ();
    #endif
    BackGround = 0 ;
    os_tsk_delete_self() ; ;
}
Exemple #11
0
static void
raise_method_missing(rb_thread_t *th, int argc, const VALUE *argv, VALUE obj,
		     int last_call_status)
{
    ID id;
    VALUE exc = rb_eNoMethodError;
    const char *format = 0;

    if (argc == 0 || !SYMBOL_P(argv[0])) {
	rb_raise(rb_eArgError, "no id given");
    }

    stack_check();

    id = SYM2ID(argv[0]);

    if (last_call_status & NOEX_PRIVATE) {
	format = "private method `%s' called for %s";
    }
    else if (last_call_status & NOEX_PROTECTED) {
	format = "protected method `%s' called for %s";
    }
    else if (last_call_status & NOEX_VCALL) {
	format = "undefined local variable or method `%s' for %s";
	exc = rb_eNameError;
    }
    else if (last_call_status & NOEX_SUPER) {
	format = "super: no superclass method `%s' for %s";
    }
    if (!format) {
	format = "undefined method `%s' for %s";
    }

    {
	int n = 0;
	VALUE args[3];
	args[n++] = rb_funcall(rb_const_get(exc, rb_intern("message")), '!',
			       3, rb_str_new2(format), obj, argv[0]);
	args[n++] = argv[0];
	if (exc == rb_eNoMethodError) {
	    args[n++] = rb_ary_new4(argc - 1, argv + 1);
	}
	exc = rb_class_new_instance(n, args, exc);

	if (!(last_call_status & NOEX_MISSING)) {
	    th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
	}
	rb_exc_raise(exc);
    }
}
Exemple #12
0
stack_t* stack_push_aba(stack_t* head, stack_t* newHead)
{
#if NON_BLOCKING == 0
    pthread_mutex_lock(&mutex);
    newHead->ptr=head;
    pthread_mutex_unlock(&mutex);

#elif NON_BLOCKING == 1
  // Implement a harware CAS-based stack

    if(head == NULL)
    {
        newHead->ptr = head;
		printf("adds first element");
    }
    else
    {
        stack_t* old;

		printf("%i size of stack.", sizeof_stack(head));
        do
        {
            old = head;
            newHead->ptr = old;
        }while(cas(newHead->ptr, old, head) != old);
    }
    /*stack* S = newHead;*/
    
    /*stack* A = stack_pop_aba(S);*/
    /*stack_push_aba(S, A)*/
    /*pthread_mutex_unlock(&mutex);*/
#else

  // Implement a software CAS-based stack
#endif

  // Debug practice: you can check if this operation results in a stack in a consistent check
  // It doesn't harm performance as sanity check are disabled at measurement time
  // This is to be updated as your implementation progresses
  stack_check((stack_t*)1);

  return newHead;
}
Exemple #13
0
int test_pop_safe() {
	  // Do some work
	  node_t* data = malloc(sizeof(node_t));
	  data->data = 0;

	  stack_push(stack, data);

	  // check if the stack is in a consistent state
	  stack_check(stack);

	  // check other properties expected after a push operation
	  // (this is to be updated as your stack design progresses)
	  // assert(stack->change_this_member == 0);

	  // For now, this test always fails
	  data = stack_pop(stack);
	  int number = data->data;
	  free(data);
	  return number == 0;
}
Exemple #14
0
extern void
USO_schedule (USO_thread_t * new_thread)
{
    if (new_thread->signals & (1 << USO_SIGNAL_STOP))
    {
        USO_thread_terminate (new_thread);
        new_thread = USO_next2run ();
    }
    /* Idle thread does not call schedule, so the old thread is terminated 
       at the next schedule call and not after the context switch! */
    if (old_thread->state == USO_EXIT)
    {
        USO_thread_terminate (old_thread);
    }
    old_thread = current_thread;
    stack_check ();
    old_thread->ticks += DEV_get_ticks_diff (schedule_time);
    schedule_time = DEV_get_ticks ();
    preemption = PREEMPTION;
    new_thread->state = USO_RUNNING;
    current_thread = new_thread;
    USO_context_switch (&old_thread->cpu, &new_thread->cpu);
}
Exemple #15
0
int test_push_safe() {
  // Make sure your stack remains in a good state with expected content when
  // several threads push concurrently to it

  // Do some work
  node_t* data = malloc(sizeof(node_t));
  data->data = 0;

  stack_push(stack, data);

  // check if the stack is in a consistent state
  stack_check(stack);

  // check other properties expected after a push operation
  // (this is to be updated as your stack design progresses)
  // assert(stack->change_this_member == 0);

  // For now, this test always fails
  data = stack_pop(stack);
  int number = data->data;
  free(data);
  return number == 0;
}
Exemple #16
0
/*******  Invoke Background Job   *******************************/
static void bg_job_invoke(void const *args) 
{
    void (*func)(void const * ) ;
    BackGround = 1 ; 
    #if defined(HAVE_KEIL_RTX)&& !defined(WOLFSSL_CMSIS_RTOS)
    stack_fill(bg_job_stack, BG_JOB_STACK_SIZE) ;
    #endif

    func = (void(*)(void const *))((func_args *)args)->argv[0] ; 
    func(args) ;        /* invoke command */
    #if defined(HAVE_KEIL_RTX) && !defined(WOLFSSL_CMSIS_RTOS)
    stack_check(bg_job_stack, BG_JOB_STACK_SIZE) ;
    #endif
    
    osDelay(20000) ;
    BackGround = 0 ;

    #ifdef WOLFSSL_CMSIS_RTOS
        osThreadTerminate(osThreadGetId()) ;
    #else   
        os_tsk_delete_self() ; ;
    #endif
}
Exemple #17
0
Fichier : tvm.c Projet : fy0/tinyre
_INLINE static
int do_ins_cmp_group(VMState* vms) {
    RunCache *rc;
    VMSnap* snap = vms->snap;
    int index = *(snap->codes + 1);
    MatchGroup* g = vms->groups + index;

#ifdef TRE_DEBUG
    printf("%12s %d\n", "CMP_GROUP", index);
#endif

    // works for special groups (?=) (?!) (?<=) (?<!)
    if (g->type == GT_IF_MATCH) {
        vms->input_cache[index - vms->group_num] = snap->str_pos;
    } else if (g->type == GT_IF_NOT_MATCH || g->type == GT_IF_NOT_PRECEDED_BY) {
        if (snap->mr.enable == 1) {
            save_snap(vms);
        } else {
            snap->codes += 2;
            save_snap(vms);
            snap->codes -= 2;
        }

        if (g->type == GT_IF_NOT_PRECEDED_BY) {
            // current matched length less than group's length
            if (snap->str_pos - vms->input_str < g->extra) {
                return 0;
            }
            snap->str_pos = snap->str_pos - g->extra;
            snap->chrcode = *snap->str_pos;
        }
    } else if (g->type == GT_IF_PRECEDED_BY) {
        // current matched length less than group's length
        if (snap->str_pos - vms->input_str < g->extra) {
            return 0;
        }
        vms->input_cache[index - vms->group_num] = snap->str_pos;
        snap->str_pos = snap->str_pos - g->extra;
        snap->chrcode = *snap->str_pos;
    }

    // save cache
    stack_check(snap->run_cache, RunCache, 5 + snap->run_cache.len);
    rc = stack_push(snap->run_cache, RunCache);
    rc->codes_cache = snap->codes;
    rc->mr = snap->mr;
    rc->cur_group = snap->cur_group;

    // load group code
    snap->codes = g->codes;
    snap->mr.enable = 0;
    snap->cur_group = index;

    // code for conditional backref
    if (g->type == GT_BACKREF_CONDITIONAL_INDEX) {
        if (vms->match_results[g->extra].head && vms->match_results[g->extra].tail) {
            snap->codes += 2;
        }
    }
    // end

    // set match result, value of head
    if (index < vms->group_num_all) {
        vms->match_results[index].tmp = snap->str_pos;
    }

    return 1;
}
Exemple #18
0
/*
 * ea_unpack_object() can be considered as a finite series of get operations on
 * a given buffer, that rebuilds the hierarchy of objects compacted by a pack
 * operation.  Because there is complex state associated with the group depth,
 * ea_unpack_object() must complete as one operation on a given buffer.
 */
ea_object_type_t
ea_unpack_object(ea_object_t **objp, int flag, void *buf, size_t bufsize)
{
	ea_file_impl_t fake;
	ea_object_t *obj;
	ea_object_type_t first_obj_type;

	*objp = NULL;
	if (buf == NULL) {
		EXACCT_SET_ERR(EXR_INVALID_BUF);
		return (EO_ERROR);
	}

	/* Set up the structures needed for unpacking */
	bzero(&fake, sizeof (ea_file_impl_t));
	if (stack_check(&fake) == -1) {
		/* exacct_errno set above. */
		return (EO_ERROR);
	}
	fake.ef_buf = buf;
	fake.ef_bufsize = bufsize;

	/* Unpack the first object in the buffer - this should succeed. */
	if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) {
		stack_free(&fake);
		/* exacct_errno set above. */
		return (EO_ERROR);
	}
	obj->eo_next = NULL;
	if ((first_obj_type = xget_object(&fake, obj, bufread_wrapper,
	    bufseek_wrapper, bufpos_wrapper, flag)) == -1) {
		stack_free(&fake);
		ea_free(obj, sizeof (ea_object_t));
		/* exacct_errno set above. */
		return (EO_ERROR);
	}

	if (obj->eo_type == EO_GROUP && unpack_group(&fake, obj, flag) == -1) {
		stack_free(&fake);
		ea_free_object(obj, flag);
		/* exacct_errno set above. */
		return (EO_ERROR);
	}
	*objp = obj;

	/*
	 * There may be other objects in the buffer - if so, chain them onto
	 * the end of the list.  We have reached the end of the list when
	 * xget_object() returns -1 with exacct_error set to EXR_EOF.
	 */
	for (;;) {
		if ((obj = ea_alloc(sizeof (ea_object_t))) == NULL) {
			stack_free(&fake);
			ea_free_object(*objp, flag);
			*objp = NULL;
			/* exacct_errno set above. */
			return (EO_ERROR);
		}
		obj->eo_next = NULL;
		if (xget_object(&fake, obj, bufread_wrapper, bufseek_wrapper,
			    bufpos_wrapper, flag) == -1) {
			stack_free(&fake);
			ea_free(obj, sizeof (ea_object_t));
			if (ea_error() == EXR_EOF) {
				EXACCT_SET_ERR(EXR_OK);
				return (first_obj_type);
			} else {
				ea_free_object(*objp, flag);
				*objp = NULL;
				/* exacct_error set above. */
				return (EO_ERROR);
			}
		}

		(void) ea_attach_to_object(*objp, obj);

		if (obj->eo_type == EO_GROUP &&
		    unpack_group(&fake, obj, flag) == -1) {
			stack_free(&fake);
			ea_free(obj, sizeof (ea_object_t));
			ea_free_object(*objp, flag);
			*objp = NULL;
			/* exacct_errno set above. */
			return (EO_ERROR);
		}
	}
}
Exemple #19
0
int
ea_fdopen(ea_file_t *ef, int fd, const char *creator, int aflags, int oflags)
{
	ea_file_impl_t *f = (ea_file_impl_t *)ef;

	bzero(f, sizeof (*f));
	f->ef_oflags = oflags;
	f->ef_fd = fd;

	/* Initialize depth stack. */
	if (stack_check(f) == -1) {
		/* exacct_error set above. */
		goto error1;
	}

	/*
	 * 1.  If we are O_CREAT, then we will need to write a header
	 * after opening name.
	 */
	if (oflags & O_CREAT) {
		if (creator == NULL) {
			EXACCT_SET_ERR(EXR_NO_CREATOR);
			goto error2;
		}
		if ((f->ef_creator = ea_strdup(creator)) == NULL) {
			/* exacct_error set above. */
			goto error2;
		}
		if ((f->ef_fp = fdopen(f->ef_fd, "w")) == NULL) {
			EXACCT_SET_ERR(EXR_SYSCALL_FAIL);
			goto error3;
		}
		if (write_header(ef) == -1) {
			/* exacct_error set above. */
			goto error3;
		}

	/*
	 * 2.  If we are not O_CREAT, but are RDWR or WRONLY, we need to
	 * seek to EOF so that appends will succeed.
	 */
	} else if (oflags & O_RDWR || oflags & O_WRONLY) {
		if ((f->ef_fp = fdopen(f->ef_fd, "r+")) == NULL) {
			EXACCT_SET_ERR(EXR_SYSCALL_FAIL);
			goto error2;
		}

		if ((aflags & EO_VALIDATE_MSK) == EO_VALID_HDR) {
			if (validate_header(ef, creator) < 0) {
				/* exacct_error set above. */
				goto error2;
			}
		}

		if (fseeko(f->ef_fp, 0, SEEK_END) == -1) {
			EXACCT_SET_ERR(EXR_SYSCALL_FAIL);
			goto error2;
		}

	/*
	 * 3. This is an undefined manner for opening an exacct file.
	 */
	} else if (oflags != O_RDONLY) {
		EXACCT_SET_ERR(EXR_NOTSUPP);
		goto error2;

	/*
	 * 4a.  If we are RDONLY, then we are in a position such that
	 * either a ea_get_object or an ea_next_object will succeed.  If
	 * aflags was set to EO_TAIL, seek to the end of the file.
	 */
	} else {
		if ((f->ef_fp = fdopen(f->ef_fd, "r")) == NULL) {
			EXACCT_SET_ERR(EXR_SYSCALL_FAIL);
			goto error2;
		}

		if ((aflags & EO_VALIDATE_MSK) == EO_VALID_HDR) {
			if (validate_header(ef, creator) == -1) {
				/* exacct_error set above. */
				goto error2;
			}
		}

		/*
		 * 4b.  Handle the "open at end" option, for consumers who want
		 * to go backwards through the file (i.e. lastcomm).
		 */
		if ((aflags & EO_POSN_MSK) == EO_TAIL) {
			if (fseeko(f->ef_fp, 0, SEEK_END) < 0) {
				EXACCT_SET_ERR(EXR_SYSCALL_FAIL);
				goto error2;
			}
		}
	}

	EXACCT_SET_ERR(EXR_OK);
	return (0);

	/* Error cleanup code */
error3:
	ea_strfree(f->ef_creator);
error2:
	stack_free(f);
error1:
	bzero(f, sizeof (*f));
	return (-1);
}
Exemple #20
0
static inline VALUE
rb_call0(VALUE klass, VALUE recv, ID mid, int argc, const VALUE *argv,
	 int scope, VALUE self)
{
    NODE *body, *method;
    int noex;
    ID id = mid;
    struct cache_entry *ent;
    rb_thread_t *th = GET_THREAD();

    if (!klass) {
	rb_raise(rb_eNotImpError,
		 "method `%s' called on terminated object (%p)",
		 rb_id2name(mid), (void *)recv);
    }
    /* is it in the method cache? */
    ent = cache + EXPR1(klass, mid);

    if (ent->mid == mid && ent->klass == klass) {
	if (!ent->method)
	    return method_missing(recv, mid, argc, argv,
				  scope == 2 ? NOEX_VCALL : 0);
	id = ent->mid0;
	noex = ent->method->nd_noex;
	klass = ent->method->nd_clss;
	body = ent->method->nd_body;
    }
    else if ((method = rb_get_method_body(klass, id, &id)) != 0) {
	noex = method->nd_noex;
	klass = method->nd_clss;
	body = method->nd_body;
    }
    else {
	if (scope == 3) {
	    return method_missing(recv, mid, argc, argv, NOEX_SUPER);
	}
	return method_missing(recv, mid, argc, argv,
			      scope == 2 ? NOEX_VCALL : 0);
    }
    

    if (mid != idMethodMissing) {
	/* receiver specified form for private method */
	if (UNLIKELY(noex)) {
	    if (((noex & NOEX_MASK) & NOEX_PRIVATE) && scope == 0) {
		return method_missing(recv, mid, argc, argv, NOEX_PRIVATE);
	    }

	    /* self must be kind of a specified form for protected method */
	    if (((noex & NOEX_MASK) & NOEX_PROTECTED) && scope == 0) {
		VALUE defined_class = klass;
		
		if (TYPE(defined_class) == T_ICLASS) {
		    defined_class = RBASIC(defined_class)->klass;
		}

		if (self == Qundef) {
		    self = th->cfp->self;
		}
		if (!rb_obj_is_kind_of(self, rb_class_real(defined_class))) {
		    return method_missing(recv, mid, argc, argv, NOEX_PROTECTED);
		}
	    }

	    if (NOEX_SAFE(noex) > th->safe_level) {
		rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(mid));
	    }
	}
    }

    stack_check();
    return vm_call0(th, klass, recv, mid, id, argc, argv, body, noex & NOEX_NOSUPER);
}
Exemple #21
0
int main()
{
    int sizestr = 1, size = 0, comdat = 0, j = 0, i = 0;
    mystack_type buf1 = 0, buf2 = 0, buf3 = 0;


    int* strcommand = (int*)calloc(j + 1,sizeof(int));
    assert(strcommand);
    FILE *product = 0;
    assembler();                            //Перевод команд из текста в цифры
    product = fopen("Product.txt","r");
    /*
        Открываем файл, считываем из него команды и числа,
        помещаем всё это в массив.
        Распределяем введенные данные по массивам меток, команд, чисел.
        Выполняем программу.
    */

    while(!feof(product))                   //Считывание команд
    {
        fscanf(product, "%d", &strcommand[j]);
        j++;
        strcommand = (int*)realloc(strcommand, sizeof(int)*(j + 1));
    }

    fclose(product);

    mystack stk = stack_construct(1);       //Рабочий стек
    mystack call_ret = stack_construct(1);  //Стек вызова


    for(i = 0; strcommand[i] && i <= j; i++)
    {
        if(strcommand[i] < 0)
            continue;
        switch (strcommand[i])
        {

            case 0: return 0;break;             //End

            case 1:                             //Push
                i++;
                stack_push(stk, strcommand[i]);
                break;

            case 2:                             //Pop
                stack_pop(stk);
                break;

            case 3:                             //Add
                stack_push(stk, stack_pop(stk) + stack_pop(stk));
                break;

            case 4:                             //Jump
                    i++;
                    i = strcommand[i] - 1;
                break;

            case 5:                             //Mul
                stack_push(stk, stack_pop(stk) * stack_pop(stk));
                break;

            case 6:                             //Sub
                stack_push(stk, -stack_pop(stk) + stack_pop(stk));
                break;

            case 7:                             //Div
                buf1 = stack_pop(stk);
                buf2 = stack_pop(stk);
                stack_push(stk, buf2 / buf1);
                break;
            case 8:                             //Push_Ax
                Ax = stack_top(stk);
                break;
            case 9:                             //Push_Bx
                Bx = stack_top(stk);
                break;
            case 10:                            //Push_Cx
                Cx = stack_top(stk);
                break;
            case 11:                            //Push_Dx
                Dx = stack_top(stk);
                break;
            case 12:                            //Call
                i++;
                stack_push(call_ret, i);
                i = strcommand[i] - 1;
                break;
            case 13:                            //Ret
                if(stack_check(stk) != 0)
                    i = stack_pop(call_ret);
                break;
            case 14:                            //Jnz
                i++;
                if(stack_top(stk) != 0)
                    i = strcommand[i] - 1;
                break;
            case 15:                            //Jz
                i++;
                if(stack_top(stk) == 0)
                    i = strcommand[i] - 1;
                break;

            case 16:                            //Cmp if (last > penultimate) := 1 if (last < penultimate) :=  0
                buf1 = stack_pop(stk);
                if(buf1 > stack_top(stk))
                    buf3 = 1;
                if(buf1 > stack_top(stk))
                    buf3 = 0;
                stack_push(stk, buf1);
                stack_push(stk, buf3);
                buf3 = 0;
                break;

            case 17:                            //Je JumpIF==
                i++;
                buf1 = stack_pop(stk);
                if(buf1 == stack_top(stk))
                    buf3 = 0;
                else
                    buf3 = -1;
                stack_push(stk, buf1);
                if(!buf3)
                    i = strcommand[i] - 1;
                buf3 = 0;
                break;

            case 18:                            //Jg last >
                i++;
                buf1 = stack_pop(stk);
                if(buf1 > stack_top(stk))
                    buf3 = 0;
                else
                    buf3 = -1;
                stack_push(stk, buf1);
                if(!buf3)
                    i = strcommand[i] - 1;
                buf3 = 0;
                break;

            case 19:                            //Jl last <
                i++;
                buf1 = stack_pop(stk);
                if(buf1 < stack_top(stk))
                    buf3 = 0;
                else
                    buf3 = -1;
                stack_push(stk, buf1);
                if(!buf3)
                    i = strcommand[i] - 1;
                buf3 = 0;
                break;

            case 20:                            //Jng last >=
                i++;
                buf1 = stack_pop(stk);
                if(buf1 >= stack_top(stk))
                    buf3 = 0;
                else
                    buf3 = -1;
                stack_push(stk, buf1);
                if(!buf3)
                    i = strcommand[i] - 1;
                buf3 = 0;
                break;

            case 21:                            //Jnl jast <=
                i++;
                buf1 = stack_pop(stk);
                if(buf1 <= stack_top(stk))
                    buf3 = 0;
                else
                    buf3 = -1;
                stack_push(stk, buf1);
                if(!buf3)
                    i = strcommand[i] - 1;
                buf3 = 0;
                break;

            case 22:                            //Sqr
                buf1 = stack_pop(stk);
                stack_push(stk, buf1 * buf1);
                break;

            case 23:                            //Sqrt
                buf1 = stack_pop(stk);
                buf1 = (mystack_type)sqrt(buf1);
                stack_push(stk, buf1);

                break;


        }

    }
    proc_dump(stk);
    stack_destruct(stk);
    stack_destruct(call_ret);
    free(strcommand);
    return 0;
}