SystemC  Recoding Infrastructure for SystemC v0.6.2 derived from Accellera SystemC 2.3.1
Accellera SystemC proof-of-concept library
sc_thread_process.h
Go to the documentation of this file.
1 /*****************************************************************************
2 
3  The following code is derived, directly or indirectly, from the SystemC
4  source code Copyright (c) 1996-2014 by all Contributors.
5  All Rights reserved.
6 
7  The contents of this file are subject to the restrictions and limitations
8  set forth in the SystemC Open Source License (the "License");
9  You may not use this file except in compliance with such restrictions and
10  limitations. You may obtain instructions on how to receive a copy of the
11  License at http://www.accellera.org/. Software distributed by Contributors
12  under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF
13  ANY KIND, either express or implied. See the License for the specific
14  language governing rights and limitations under the License.
15 
16  *****************************************************************************/
17 
18 /*****************************************************************************
19 
20  sc_thread_process.h -- Thread process declarations
21 
22  Original Author: Andy Goodrich, Forte Design Systems, 4 August 2005
23 
24 
25  CHANGE LOG AT THE END OF THE FILE
26  *****************************************************************************/
27 
28 
29 #if !defined(sc_thread_process_h_INCLUDED)
30 #define sc_thread_process_h_INCLUDED
31 
33 #include "sysc/kernel/sc_process.h"
34 #include "sysc/kernel/sc_cor.h"
35 #include "sysc/kernel/sc_event.h"
36 #include "sysc/kernel/sc_except.h"
37 #include "sysc/kernel/sc_reset.h"
38 // DEBUGGING MACROS:
39 //
40 // DEBUG_MSG(NAME,P,MSG)
41 // MSG = message to print
42 // NAME = name that must match the process for the message to print, or
43 // null if the message should be printed unconditionally.
44 // P = pointer to process message is for, or NULL in which case the
45 // message will not print.
46 #if 0
47 # define DEBUG_NAME ""
48 # define DEBUG_MSG(NAME,P,MSG) \
49  { \
50  if ( P && ( (strlen(NAME)==0) || !strcmp(NAME,P->name())) ) \
51  std::cout << "**** " << sc_time_stamp() << " (" \
52  << sc_get_current_process_name() << "): " << MSG \
53  << " - " << P->name() << std::endl; \
54  }
55 #else
56 # define DEBUG_MSG(NAME,P,MSG)
57 #endif
58 
59 // 02/22/2016 ZC: to enable verbose display or not
60 #ifndef _SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR
61 #define _SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR "SYSC_PRINT_VERBOSE_MESSAGE"
62 #endif
63 namespace sc_core {
64 
65 // forward references:
66 class sc_event_and_list;
67 class sc_event_or_list;
68 class sc_reset;
69 void sc_thread_cor_fn( void* );
70 void sc_set_stack_size( sc_thread_handle, std::size_t );
71 class sc_event;
72 class sc_join;
73 class sc_module;
74 class sc_process_handle;
75 class sc_process_table;
76 class sc_simcontext;
77 class sc_runnable;
78 
79 class Invoker; //DM 05/16/2019
80 
81 sc_cor* get_cor_pointer( sc_process_b* process_p );
82 void sc_set_stack_size( sc_thread_handle thread_h, std::size_t size );
83 void wait( int , sc_simcontext* );
84 void wait( const sc_event&, int , sc_simcontext* );
85 void wait( const sc_event_or_list&, int , sc_simcontext* );
86 void wait( const sc_event_and_list&, int , sc_simcontext* );
87 void wait( const sc_time&, int , sc_simcontext* );
88 void wait( const sc_time&, const sc_event&, int , sc_simcontext* );
89 void wait( const sc_time&, const sc_event_or_list&, int , sc_simcontext* );
90 void wait( const sc_time&, const sc_event_and_list&, int , sc_simcontext* );
91 
92 /**************************************************************************/
98  friend void sc_thread_cor_fn( void* );
99  friend void sc_set_stack_size( sc_thread_handle, std::size_t );
100  friend class sc_event;
101  friend class sc_join;
102  friend class sc_module;
103 
104  // 04/07/2015 GL: a new sc_channel class is derived from sc_module
105  friend class sc_channel;
106 
107  friend class sc_process_b;
108  friend class sc_process_handle;
109  friend class sc_process_table;
110  friend class sc_simcontext;
111  friend class sc_runnable;
112  friend sc_cor* get_cor_pointer( sc_process_b* process_p );
113 
114  friend class Invoker; //DM 05/16/2019
115 
116  // 06/12/2015 GL: modified for the OoO simulation
117  friend void wait( int , sc_simcontext* );
118  friend void wait( const sc_event&, int , sc_simcontext* );
119  friend void wait( const sc_event_or_list&, int , sc_simcontext* );
120  friend void wait( const sc_event_and_list&, int , sc_simcontext* );
121  friend void wait( const sc_time&, int , sc_simcontext* );
122  friend void wait( const sc_time&, const sc_event&, int , sc_simcontext* );
123  friend void wait( const sc_time&, const sc_event_or_list&, int ,
124  sc_simcontext* );
125  friend void wait( const sc_time&, const sc_event_and_list&, int ,
126  sc_simcontext*);
127 
128  public:
129 
132  sc_thread_process( const char* name_p, bool free_host,
133  SC_ENTRY_FUNC method_p, sc_process_host* host_p,
134  const sc_spawn_options* opt_p );
135 
136  virtual const char* kind() const
137  { return "sc_thread_process"; }
138 
139  void aux_boundary();
140  protected:
141  // may not be deleted manually (called from sc_process_b)
142  virtual ~sc_thread_process();
143 
144  virtual void disable_process(
146  virtual void enable_process(
148  virtual void kill_process(
152  virtual void prepare_for_simulation();
153  virtual void resume_process(
155  void set_next_exist( sc_thread_handle next_p );
156  void set_next_runnable( sc_thread_handle next_p );
157 
158  void set_stack_size( std::size_t size );
159  inline void suspend_me();
160  virtual void suspend_process(
162  virtual void throw_reset( bool async );
163  virtual void throw_user( const sc_throw_it_helper& helper,
165 
166  bool trigger_dynamic( sc_event*, bool& );
167  bool deliver_event_at_time( sc_event* e, sc_timestamp e_delivery_time );
168 
173  // 08/14/2015 GL: add a new parameter to update the local time stamp
174  //inline void trigger_static();
175  inline void trigger_static( sc_event* );
176 
177 
178 
183  // 06/12/2015 GL: modified for the OoO simulation
184  void wait( const sc_event&, int = -1 );
185 
190  // 06/12/2015 GL: modified for the OoO simulation
191  void wait( const sc_event_or_list&, int = -1 );
192 
197  // 06/12/2015 GL: modified for the OoO simulation
198  void wait( const sc_event_and_list&, int = -1 );
199 
204  // 06/12/2015 GL: modified for the OoO simulation
205  void wait( const sc_time&, int = -1 );
206 
211  // 06/12/2015 GL: modified for the OoO simulation
212  void wait( const sc_time&, const sc_event&, int = -1 );
213 
218  // 06/12/2015 GL: modified for the OoO simulation
219  void wait( const sc_time&, const sc_event_or_list&, int = -1 );
220 
225  // 06/12/2015 GL: modified for the OoO simulation
226  void wait( const sc_time&, const sc_event_and_list&, int = -1 );
227 
232  // 06/12/2015 GL: modified for the OoO simulation
233  void wait_cycles( int, int n=1 );
234 
235  protected:
236  void add_monitor( sc_process_monitor* monitor_p );
237  void remove_monitor( sc_process_monitor* monitor_p);
238  void signal_monitors( int type = 0 );
239 
240  protected:
241  sc_cor* m_cor_p; // Thread's coroutine.
242  std::vector<sc_process_monitor*> m_monitor_q; // Thread monitors.
243  std::size_t m_stack_size; // Thread stack size.
244  int m_wait_cycle_n; // # of waits to be done.
245 
246  private: // disabled
248  const sc_thread_process& operator = ( const sc_thread_process& );
249 
250 //DM 07/31/2019 experiment
251 struct pairhash {
252 public:
253  template <typename T, typename U>
254  std::size_t operator()(const std::pair<T, U> &x) const
255  {
256  return std::hash<T>()(x.first) ^ std::hash<U>()(x.second);
257  }
258 };
259 
260  //std::unordered_set<std::pair<int,int>,pairhash > dependency_checked_segs;
261  std::unordered_set<int> dependency_checked_segs;
262 
263  std::unordered_map<int,std::vector<sc_method_process*> > m_contingent_methods;
264  //std::unordered_map<int,std::unordered_map<std::pair<double, long long>,std::unordered_set<sc_thread_process*>,pairhash > > m_contingent_threads;
265 
266  std::unordered_map<int,std::unordered_set<sc_thread_process*> > m_contingent_threads;
267 
268  std::unordered_map<int,std::unordered_set<sc_method_process*> > dependent_methods;
269  std::unordered_map<int,std::unordered_set<sc_thread_process*> > dependent_threads;
270 
271  //std::map<sc_timestamp,std::unordered_set<sc_thread_process*> > dependent_threads;
272 
273  std::unordered_map<int, std::unordered_set<sc_method_process*> > combined_data_conflict_methods;
274  std::unordered_map<int, std::unordered_set<sc_thread_process*> > combined_data_conflict_threads;
275 
276 
277 };
278 
279 //------------------------------------------------------------------------------
280 //"sc_thread_process::set_stack_size"
281 //
282 //------------------------------------------------------------------------------
283 inline void sc_thread_process::set_stack_size( std::size_t size )
284 {
285  assert( size );
286  m_stack_size = size;
287 }
288 
289 //------------------------------------------------------------------------------
290 //"sc_thread_process::suspend_me"
291 //
292 // This method suspends this object instance in favor of the next runnable
293 // process. Upon awakening we check to see if an exception should be thrown.
294 // There are two types of exceptions that can be thrown, synchronous reset
295 // and asynchronous reset. At a future time there may be more asynchronous
296 // exceptions. If an asynchronous reset is seen and there is not static reset
297 // specified, or the static reset is not active then clear the throw
298 // type for the next time this method is called.
299 //
300 // Notes:
301 // (1) For an explanation of how the reset mechanism works see the top of
302 // the file sc_reset.cpp.
303 // (2) The m_sticky_reset field is used to handle synchronous resets that
304 // are enabled via the sc_process_handle::sync_reset_on() method. These
305 // resets are not generated by a signal, but rather are modal by
306 // method call: sync_reset_on() - sync_reset_off().
307 //------------------------------------------------------------------------------
309 {
310  // 11/21/2014 GL: assume we have acquired the kernel lock upon here
311 #ifdef SC_LOCK_CHECK
312  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
313 #endif /* SC_LOCK_CHECK */
314 
315  // remember, if we're currently unwinding
316 
317  bool unwinding_preempted = m_unwinding;
318 
319  sc_simcontext* simc_p = simcontext();
320  //sc_cor* cor_p = simc_p->next_cor();
321  simc_p->new_waiting_proc.push_back(this);
322  simc_p->remove_running_process( (sc_process_b*)this );
323 
324  //the following code should not be here, because suspend_me only removes
325  //a process from running queue
326  //put it into waiting queue and set its state 10:23 2017/3/10 ZC
327  //simc_p->add_to_wait_queue( (sc_process_b*)this );
328  //this->m_process_state=12;
329 
330  //printf("calling oooschedule() from process %s",this->process_name);
331  // simc_p->schedule( m_cor_p );
332  // if(getenv(_SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR))
333  // printf("%s is calling suspend_me()\n",this->name());
334  /*DM 07/24/2019*/
335  simc_p->update_dependency_set();
336 
337  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
338  int cur_kernel_requests = sc_get_curr_simcontext()->num_kernel_requests;
339  sc_get_curr_simcontext()->num_kernel_requests--;
340  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
341 
342  if(cur_kernel_requests == 1) {
343  simc_p->oooschedule( m_cor_p ); // 08/19/2015 GL: OoO scheduling
344  }
345 
346  // do not switch, if we're about to execute next (e.g. suicide)
347 
348  //if( m_cor_p != cor_p )
349  //{
350  // DEBUG_MSG( DEBUG_NAME , this, "suspending thread");
351  // simc_p->cor_pkg()->yield( cor_p );
352  // DEBUG_MSG( DEBUG_NAME , this, "resuming thread");
353  //}
354 
355  // if I am not scheduled to execute again
356  if ( !simc_p->is_running_process( (sc_process_b*)this ) )
357  {
358  DEBUG_MSG( DEBUG_NAME , this, "suspending thread");
359  simc_p->suspend_cor( m_cor_p );
360  DEBUG_MSG( DEBUG_NAME , this, "resuming thread");
361  }
362 
363  // IF THERE IS A THROW TO BE DONE FOR THIS PROCESS DO IT NOW:
364  //
365  // (1) Optimize THROW_NONE for speed as it is the normal case.
366  // (2) If this thread is already unwinding then suspend_me() was
367  // called from the catch clause to throw an exception on another
368  // process, so just go back to the catch clause.
369 
370  if ( m_throw_status == THROW_NONE ) return;
371 
372  if ( m_unwinding ) return;
373 
374  switch( m_throw_status )
375  {
376  case THROW_ASYNC_RESET:
377  case THROW_SYNC_RESET:
378  DEBUG_MSG( DEBUG_NAME , this, "throwing reset for");
380  throw sc_unwind_exception( this, true );
381 
382  case THROW_USER:
383  DEBUG_MSG( DEBUG_NAME, this, "invoking throw_it for");
386  THROW_NONE);
388  break;
389 
390  case THROW_KILL:
391  DEBUG_MSG( DEBUG_NAME, this, "throwing kill for");
392  throw sc_unwind_exception( this, false );
393 
394  default: // THROWING_NOW
395  sc_assert( unwinding_preempted );
396  DEBUG_MSG( DEBUG_NAME, this, "restarting thread");
397  break;
398  }
399 }
400 
401 
402 //------------------------------------------------------------------------------
403 //"sc_thread_process::wait"
404 //
405 // Notes:
406 // (1) The correct order to lock and unlock channel locks (to avoid deadlocks
407 // and races) for SystemC functions with context switch:
408 //
409 // outer_channel.lock_and_push
410 // [outer channel work]
411 // inner_channel.lock_and_push
412 // [inner channel work]
413 // +----------------------------------WAIT----------------------------------+
414 // | +------------------------Simulation Kernel------------------------+ |
415 // | | acquire kernel lock | |
416 // | | +------unlock_all_channels-----+ | |
417 // | | | inner_channel.unlock | | |
418 // | | | outer_channel.unlock | | |
419 // | | +------------------------------+ | |
420 // | | [kernel work] | |
421 // | | pthread_cond_wait: release kernel lock | |
422 // | | [sleep] | |
423 // | | pthread_cond_wait (upon wakeup): acquire kernel lock | |
424 // | | [kernel work] | |
425 // | | release kernel lock | |
426 // | +-----------------------------------------------------------------+ |
427 // | [no lock/no work] |
428 // | +------lock_all_channels-------+ |
429 // | | outer_channel.lock | |
430 // | | inner_channel.lock | |
431 // | +------------------------------+ |
432 // +------------------------------------------------------------------------+
433 // [inner channel work]
434 // inner_channel.pop_and_unlock
435 // [outer channel work]
436 // outer_channel.pop_and_unlock
437 //
438 // (2) If we did not consider immediate notification, a more cleaner locking
439 // order should be:
440 //
441 // outer_channel.lock_and_push
442 // [outer channel work]
443 // inner_channel.lock_and_push
444 // [inner channel work]
445 // +----------------------------------WAIT----------------------------------+
446 // | +----unlock_all_channels----+ |
447 // | | inner_channel.unlock | |
448 // | | outer_channel.unlock | |
449 // | +---------------------------+ |
450 // | [no lock/no work] |
451 // | +-------------------------Simulation Kernel-------------------------+ |
452 // | | acquire kernel lock | |
453 // | | [kernel work] | |
454 // | | pthread_cond_wait: release kernel lock | |
455 // | | [sleep] | |
456 // | | pthread_cond_wait (upon wakeup): acquire kernel lock | |
457 // | | [kernel work] | |
458 // | | release kernel lock | |
459 // | +-------------------------------------------------------------------+ |
460 // | [no lock/no work] |
461 // | +-----lock_all_channels-----+ |
462 // | | outer_channel.lock | |
463 // | | inner_channel.lock | |
464 // | +---------------------------+ |
465 // +------------------------------------------------------------------------+
466 // [inner channel work]
467 // inner_channel.pop_and_unlock
468 // [outer channel work]
469 // outer_channel.pop_and_unlock
470 //
471 // (3) When acquiring the channel locks, we may encounter the same lock for
472 // several times (a channel method calls another one in the same channel).
473 // But we should encounter them one right after another, so the same lock
474 // is at the end of the lock queue. If we encounter a lock that is in the
475 // middle of the queue, then it is a bad coding style and simulation may
476 // break (meaning an inner channel method calls an outer channel method).
477 //
478 // (4) For more information, please refer to the following files:
479 // sc_method_process.h: 184 (sc_method_process::next_trigger)
480 // sc_event.cpp: 79 (sc_event::notify)
481 //
482 // (02/19/2015 GL)
483 //------------------------------------------------------------------------------
484 inline
485 void
487 {
488 
489 
490 
491 
492  if( m_unwinding )
493  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
494 
495  {
496  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
497  sc_kernel_lock lock;
498 
499 #ifdef SC_LOCK_CHECK
500  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
501 #endif /* SC_LOCK_CHECK */
502 
503  // 08/14/2015 GL: set the new segment ID of this thread
504  set_segment_id( -2 );
505 
506  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
507 
508 
509  //ZC 9:06 2017/3/14
510  // if(getenv(_SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR))
511  // printf("\n%s is calling wait for nothing\n",this->name());
512 
513 
515  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
516  }
517 #ifdef SC_LOCK_CHECK
518  assert( sc_get_curr_simcontext()->is_not_owner() );
519 #endif /* SC_LOCK_CHECK */
520  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
521 
522 
523 
524 }
525 
526 inline
527 void
528 sc_thread_process::wait( const sc_event& e, int seg_id )
529 {
530 
531 
532  if( m_unwinding )
533  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
534 
535  {
536  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
537  /*DM 07/24/2019 */
538  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
539  sc_get_curr_simcontext()->num_kernel_requests++;
540  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
541 
542  sc_kernel_lock lock;
543 
544 #ifdef SC_LOCK_CHECK
545  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
546 #endif /* SC_LOCK_CHECK */
547 
548  // 08/14/2015 GL: set the new segment ID of this thread
549  set_segment_id( seg_id );
550 
551  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
552  m_event_p = &e; // for cleanup.
553  e.add_dynamic( this );
554 
555 
557 
558  //ZC 9:06 2017/3/14
559  // if(getenv(_SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR))
560  // printf("\n%s is calling wait for event %s\n",this->name(),e.name());
561  this->m_process_state=2;
562  sc_get_curr_simcontext()->add_to_wait_queue( (sc_process_b*)this ) ;
563 
564 
565 
566  suspend_me();
567  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
568  }
569 #ifdef SC_LOCK_CHECK
570  assert( sc_get_curr_simcontext()->is_not_owner() );
571 #endif /* SC_LOCK_CHECK */
572  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
573 }
574 
575 inline
576 void
578 {
579 
580  if( m_unwinding )
581  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
582 
583  {
584  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
585  /*DM 07/24/2019 */
586  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
587  sc_get_curr_simcontext()->num_kernel_requests++;
588  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
589 
590  sc_kernel_lock lock;
591 
592 #ifdef SC_LOCK_CHECK
593  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
594 #endif /* SC_LOCK_CHECK */
595 
596  // 08/14/2015 GL: set the new segment ID of this thread
597  set_segment_id( seg_id );
598 
599  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
600  el.add_dynamic( this );
601  m_event_list_p = &el;
602  m_event_count = el.size(); //ZC 2018/7/9
603  //std::cout << "wait for or_list m_event_count = " << m_event_count << std::endl;
605 
606  //ZC 19:59 2018/7/9 copied for the andlist downward
607  //I dont know why I didnt do this before
608  // if(getenv(_SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR))
609  // printf("\n%s is calling wait for event list\n",this->name());
610  this->m_process_state=2;
611  sc_get_curr_simcontext()->add_to_wait_queue( (sc_process_b*)this ) ;
612 
613 
614  suspend_me();
615  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
616  }
617 #ifdef SC_LOCK_CHECK
618  assert( sc_get_curr_simcontext()->is_not_owner() );
619 #endif /* SC_LOCK_CHECK */
620  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
621 }
622 
623 inline
624 void
626 {
627  if( m_unwinding )
628  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
629 
630  {
631  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
632  /*DM 07/24/2019 */
633  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
634  sc_get_curr_simcontext()->num_kernel_requests++;
635  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
636 
637 
638  sc_kernel_lock lock;
639 
640 #ifdef SC_LOCK_CHECK
641  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
642 #endif /* SC_LOCK_CHECK */
643 
644  // 08/14/2015 GL: set the new segment ID of this thread
645  set_segment_id( seg_id );
646 
647  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
648  el.add_dynamic( this );
649  m_event_list_p = &el;
650  m_event_count = el.size();
651  //this->m_process_state=7;
652  //std::cout << "wait for and_list m_event_count = " << m_event_count << std::endl;
654 
655  //ZC 9:06 2017/3/14
656  // if(getenv(_SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR))
657  // printf("\n%s is calling wait for event list\n",this->name());
658  this->m_process_state=2;
659  sc_get_curr_simcontext()->add_to_wait_queue( (sc_process_b*)this ) ;
660 
661 
662  suspend_me();
663  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
664  }
665 #ifdef SC_LOCK_CHECK
666  assert( sc_get_curr_simcontext()->is_not_owner() );
667 #endif /* SC_LOCK_CHECK */
668  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
669 }
670 
671 inline
672 void
673 sc_thread_process::wait( const sc_time& t, int seg_id )
674 {
675  if( m_unwinding )
676  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
677 
678  {
679  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
680  /*DM 07/24/2019 */
681  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
682  sc_get_curr_simcontext()->num_kernel_requests++;
683  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
684 
685  sc_kernel_lock lock;
686 
687 #ifdef SC_LOCK_CHECK
688  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
689 #endif /* SC_LOCK_CHECK */
690 
691  // 08/14/2015 GL: set the new segment ID of this thread
692  set_segment_id( seg_id );
693 
694  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
695  m_timeout_event_p->notify_internal( t );
696  m_timeout_event_p->add_dynamic( this );
697 
698  if(t==SC_ZERO_TIME){
699  this->m_process_state=3;
700  //sc_get_curr_simcontext()->add_to_wait_queue( (sc_process_b*)this ) ;
701  // if(getenv(_SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR))
702  // printf("\n%s is calling wait for delta time\n",this->name());
703  }
704  else {
705  this->m_process_state=3; //ZC
706  // if(getenv(_SYSC_PRINT_VERBOSE_MESSAGE_ENV_VAR))
707  // printf("\n%s is calling wait for time\n",this->name());
708  }
710 
711  suspend_me();
712  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
713  }
714 #ifdef SC_LOCK_CHECK
715  assert( sc_get_curr_simcontext()->is_not_owner() );
716 #endif /* SC_LOCK_CHECK */
717  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
718 }
719 
720 inline
721 void
722 sc_thread_process::wait( const sc_time& t, const sc_event& e, int seg_id )
723 {
724  if( m_unwinding )
725  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
726 
727  {
728  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
729  /*DM 07/24/2019 */
730  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
731  sc_get_curr_simcontext()->num_kernel_requests++;
732  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
733 
734  sc_kernel_lock lock;
735 
736 #ifdef SC_LOCK_CHECK
737  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
738 #endif /* SC_LOCK_CHECK */
739 
740  // 08/14/2015 GL: set the new segment ID of this thread
741  set_segment_id( seg_id );
742 
743  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
744  m_timeout_event_p->notify_internal( t );
745  m_timeout_event_p->add_dynamic( this );
746  e.add_dynamic( this );
747  m_event_p = &e;
749  suspend_me();
750  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
751  }
752 #ifdef SC_LOCK_CHECK
753  assert( sc_get_curr_simcontext()->is_not_owner() );
754 #endif /* SC_LOCK_CHECK */
755  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
756 }
757 
758 inline
759 void
760 sc_thread_process::wait( const sc_time& t, const sc_event_or_list& el, int seg_id )
761 {
762  if( m_unwinding )
763  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
764 
765  {
766  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
767  /*DM 07/24/2019 */
768  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
769  sc_get_curr_simcontext()->num_kernel_requests++;
770  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
771 
772  sc_kernel_lock lock;
773 
774 #ifdef SC_LOCK_CHECK
775  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
776 #endif /* SC_LOCK_CHECK */
777 
778  // 08/14/2015 GL: set the new segment ID of this thread
779  set_segment_id( seg_id );
780 
781  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
782  m_timeout_event_p->notify_internal( t );
783  m_timeout_event_p->add_dynamic( this );
784  el.add_dynamic( this );
785  m_event_list_p = &el;
787  suspend_me();
788  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
789  }
790 #ifdef SC_LOCK_CHECK
791  assert( sc_get_curr_simcontext()->is_not_owner() );
792 #endif /* SC_LOCK_CHECK */
793  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
794 }
795 
796 inline
797 void
798 sc_thread_process::wait( const sc_time& t, const sc_event_and_list& el, int seg_id )
799 {
800  if( m_unwinding )
801  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
802 
803  {
804  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
805  /*DM 07/24/2019 */
806  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
807  sc_get_curr_simcontext()->num_kernel_requests++;
808  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
809 
810 
811  sc_kernel_lock lock;
812 
813 #ifdef SC_LOCK_CHECK
814  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
815 #endif /* SC_LOCK_CHECK */
816 
817  // 08/14/2015 GL: set the new segment ID of this thread
818  set_segment_id( seg_id );
819 
820  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
821  m_timeout_event_p->notify_internal( t );
822  m_timeout_event_p->add_dynamic( this );
823  el.add_dynamic( this );
824  m_event_list_p = &el;
825  m_event_count = el.size();
827  suspend_me();
828  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
829  }
830 #ifdef SC_LOCK_CHECK
831  assert( sc_get_curr_simcontext()->is_not_owner() );
832 #endif /* SC_LOCK_CHECK */
833  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
834 }
835 
836 //------------------------------------------------------------------------------
837 //"sc_thread_process::wait_cycles"
838 //
839 // This method suspends this object instance for the specified number of cycles.
840 // A cycle is defined as the event the thread is set up to staticly wait on.
841 // The field m_wait_cycle_n is set to one less than the number of cycles to
842 // be waited for, since the value is tested before being decremented in
843 // the simulation kernel.
844 //------------------------------------------------------------------------------
845 inline
846 void
847 sc_thread_process::wait_cycles( int seg_id, int n )
848 {
849  if( m_unwinding )
850  SC_REPORT_ERROR( SC_ID_WAIT_DURING_UNWINDING_, name() );
851 
852  {
853  // 05/25/2015 GL: sc_kernel_lock constructor acquires the kernel lock
854  /*DM 07/24/2019 */
855  pthread_mutex_lock( &sc_get_curr_simcontext()->kernel_request_mutex );
856  sc_get_curr_simcontext()->num_kernel_requests++;
857  pthread_mutex_unlock( &sc_get_curr_simcontext()->kernel_request_mutex );
858 
859 
860  sc_kernel_lock lock;
861 
862 #ifdef SC_LOCK_CHECK
863  assert( sc_get_curr_simcontext()->is_locked_and_owner() );
864 #endif /* SC_LOCK_CHECK */
865 
866  // 08/14/2015 GL: set the new segment ID of this thread
867  set_segment_id( seg_id );
868 
869  unlock_all_channels(); // 02/16/2015 GL: release all the channel locks
870  m_wait_cycle_n = n-1;
871  suspend_me();
872  // 05/25/2015 GL: sc_kernel_lock destructor releases the kernel lock
873  }
874 #ifdef SC_LOCK_CHECK
875  assert( sc_get_curr_simcontext()->is_not_owner() );
876 #endif /* SC_LOCK_CHECK */
877  lock_all_channels(); // 02/16/2015 GL: acquire all the channel locks
878 }
879 
880 //------------------------------------------------------------------------------
881 //"sc_thread_process::miscellaneous support"
882 //
883 //------------------------------------------------------------------------------
884 inline
886 {
887  m_monitor_q.push_back(monitor_p);
888 }
889 
890 
891 inline
893 {
894  int mon_n = m_monitor_q.size();
895 
896  for ( int mon_i = 0; mon_i < mon_n; mon_i++ )
897  {
898  if ( m_monitor_q[mon_i] == monitor_p )
899  {
900  m_monitor_q[mon_i] = m_monitor_q[mon_n-1];
901  m_monitor_q.resize(mon_n-1);
902  }
903  }
904 }
905 
906 inline
908 {
909  m_exist_p = next_p;
910 }
911 
912 inline
914 {
915  return (sc_thread_handle)m_exist_p;
916 }
917 
918 inline
920 {
921  m_runnable_p = next_p;
922 }
923 
924 inline
926 {
928 }
929 
930 //------------------------------------------------------------------------------
931 //"sc_thread_process::trigger_static"
932 //
933 // This inline method adds the current thread to the queue of runnable
934 // processes, if required. This is the case if the following criteria
935 // are met:
936 // (1) The process is in a runnable state.
937 // (2) The process is not already on the run queue.
938 // (3) The process is expecting a static trigger,
939 // dynamic event waits take priority.
940 // (4) The process' static wait count is zero.
941 //
942 // If the triggering process is the same process, the trigger is
943 // ignored as well, unless SC_ENABLE_IMMEDIATE_SELF_NOTIFICATIONS
944 // is defined.
945 //------------------------------------------------------------------------------
946 inline
947 void
948 // 08/14/2015 GL: add a new parameter to update the local time stamp
949 //sc_thread_process::trigger_static()
951 {
952  // 05/05/2015 GL: we may or may not have acquired the kernel lock upon here
953  // 1) this function is invoked in sc_simcontext::prepare_to_simulate(),
954  // where the kernel lock is not acquired as it is in the initialization
955  // phase
956  // 2) this function is also invoked in sc_event::notify(), where the kernel
957  // lock is acquired
958 
959  // No need to try queueing this thread if one of the following is true:
960  // (a) its disabled
961  // (b) its already queued for execution
962  // (c) its waiting on a dynamic event
963  // (d) its wait count is not satisfied
964 
965  if ( (m_state & ps_bit_disabled) || is_runnable() ||
967  return;
968 
969 #if ! defined( SC_ENABLE_IMMEDIATE_SELF_NOTIFICATIONS )
970  if( SC_UNLIKELY_( sc_get_current_process_b() == this ) )
971  {
973  return;
974  }
975 #endif // SC_ENABLE_IMMEDIATE_SELF_NOTIFICATIONS
976 
977  if ( m_wait_cycle_n > 0 )
978  {
979  --m_wait_cycle_n;
980  return;
981  }
982 
983  // If we get here then the thread is has satisfied its wait criteria, if
984  // its suspended mark its state as ready to run. If its not suspended then
985  // push it onto the runnable queue.
986 
987  if ( m_state & ps_bit_suspended )
988  {
990  }
991  else
992  {
993  // 12/22/2016 GL: store the current time before updating
994  sc_time curr_time = m_timestamp.get_time_count();
995 
996  // 08/14/2015 GL: update the local time stamp of this thread process
998  switch( e->m_notify_type )
999  {
1000  case sc_event::DELTA: // delta notification
1001  if ( ts > m_timestamp ) {
1003  ts.get_delta_count() + 1 ) );
1004  } else {
1007  + 1 ) );
1008  }
1009  break;
1010  case sc_event::TIMED: // timed notification
1011  set_timestamp( ts );
1012  break;
1013  case sc_event::NONE:
1014  assert( 0 ); // wrong type
1015  }
1016 
1017  simcontext()->push_runnable_thread(this);
1018 
1019  // 12/22/2016 GL: update m_oldest_time in sc_simcontext if necessary
1020  simcontext()->update_oldest_time( curr_time );
1021  }
1022 }
1023 
1024 #undef DEBUG_MSG
1025 #undef DEBUG_NAME
1026 
1027 } // namespace sc_core
1028 
1029 // $Log: sc_thread_process.h,v $
1030 // Revision 1.30 2011/08/26 20:46:11 acg
1031 // Andy Goodrich: moved the modification log to the end of the file to
1032 // eliminate source line number skew when check-ins are done.
1033 //
1034 // Revision 1.29 2011/08/24 23:36:12 acg
1035 // Andy Goodrich: removed break statements that can never be reached and
1036 // which causes warnings in the Greenhills C++ compiler.
1037 //
1038 // Revision 1.28 2011/04/14 22:34:27 acg
1039 // Andy Goodrich: removed dead code.
1040 //
1041 // Revision 1.27 2011/04/13 05:02:18 acg
1042 // Andy Goodrich: added missing check to the wake up code in suspend_me()
1043 // so that we just return if the call to suspend_me() was issued from a
1044 // stack unwinding.
1045 //
1046 // Revision 1.26 2011/04/13 02:44:26 acg
1047 // Andy Goodrich: added m_unwinding flag in place of THROW_NOW because the
1048 // throw status will be set back to THROW_*_RESET if reset is active and
1049 // the check for an unwind being complete was expecting THROW_NONE as the
1050 // clearing of THROW_NOW.
1051 //
1052 // Revision 1.25 2011/04/11 22:05:14 acg
1053 // Andy Goodrich: use the DEBUG_NAME macro in DEBUG_MSG invocations.
1054 //
1055 // Revision 1.24 2011/04/10 22:12:32 acg
1056 // Andy Goodrich: adding debugging macros.
1057 //
1058 // Revision 1.23 2011/04/08 22:41:28 acg
1059 // Andy Goodrich: added comment pointing to the description of the reset
1060 // mechanism in sc_reset.cpp.
1061 //
1062 // Revision 1.22 2011/04/08 18:27:33 acg
1063 // Andy Goodrich: added check to make sure we don't schedule a running process
1064 // because of it issues a notify() it is sensitive to.
1065 //
1066 // Revision 1.21 2011/04/05 06:22:38 acg
1067 // Andy Goodrich: expanded comment for trigger_static() initial vetting.
1068 //
1069 // Revision 1.20 2011/04/01 21:24:57 acg
1070 // Andy Goodrich: removed unused code.
1071 //
1072 // Revision 1.19 2011/02/19 08:30:53 acg
1073 // Andy Goodrich: Moved process queueing into trigger_static from
1074 // sc_event::notify.
1075 //
1076 // Revision 1.18 2011/02/18 20:27:14 acg
1077 // Andy Goodrich: Updated Copyrights.
1078 //
1079 // Revision 1.17 2011/02/17 19:55:58 acg
1080 // Andy Goodrich:
1081 // (1) Changed signature of trigger_dynamic() back to a bool.
1082 // (2) Simplified process control usage.
1083 // (3) Changed trigger_static() to recognize process controls and to
1084 // do the down-count on wait(N), allowing the elimination of
1085 // ready_to_run().
1086 //
1087 // Revision 1.16 2011/02/16 22:37:31 acg
1088 // Andy Goodrich: clean up to remove need for ps_disable_pending.
1089 //
1090 // Revision 1.15 2011/02/13 21:47:38 acg
1091 // Andy Goodrich: update copyright notice.
1092 //
1093 // Revision 1.14 2011/02/13 21:35:54 acg
1094 // Andy Goodrich: added error for performing a wait() during unwinding.
1095 //
1096 // Revision 1.13 2011/02/11 13:25:24 acg
1097 // Andy Goodrich: Philipp A. Hartmann's changes:
1098 // (1) Removal of SC_CTHREAD method overloads.
1099 // (2) New exception processing code.
1100 //
1101 // Revision 1.12 2011/02/01 23:01:53 acg
1102 // Andy Goodrich: removed dead code.
1103 //
1104 // Revision 1.11 2011/02/01 21:18:01 acg
1105 // Andy Goodrich:
1106 // (1) Changes in throw processing for new process control rules.
1107 // (2) Support of new process_state enum values.
1108 //
1109 // Revision 1.10 2011/01/25 20:50:37 acg
1110 // Andy Goodrich: changes for IEEE 1666 2011.
1111 //
1112 // Revision 1.9 2011/01/19 23:21:50 acg
1113 // Andy Goodrich: changes for IEEE 1666 2011
1114 //
1115 // Revision 1.8 2011/01/18 20:10:45 acg
1116 // Andy Goodrich: changes for IEEE1666_2011 semantics.
1117 //
1118 // Revision 1.7 2011/01/06 17:59:58 acg
1119 // Andy Goodrich: removed debugging output.
1120 //
1121 // Revision 1.6 2010/07/22 20:02:33 acg
1122 // Andy Goodrich: bug fixes.
1123 //
1124 // Revision 1.5 2009/07/28 01:10:53 acg
1125 // Andy Goodrich: updates for 2.3 release candidate.
1126 //
1127 // Revision 1.4 2009/05/22 16:06:29 acg
1128 // Andy Goodrich: process control updates.
1129 //
1130 // Revision 1.3 2009/03/12 22:59:58 acg
1131 // Andy Goodrich: updates for 2.4 stuff.
1132 //
1133 // Revision 1.2 2008/05/22 17:06:06 acg
1134 // Andy Goodrich: formatting and comments.
1135 //
1136 // Revision 1.1.1.1 2006/12/15 20:20:05 acg
1137 // SystemC 2.3
1138 //
1139 // Revision 1.7 2006/05/08 17:57:13 acg
1140 // Andy Goodrich: Added David Long's forward declarations for friend functions
1141 // to keep the Microsoft C++ compiler happy.
1142 //
1143 // Revision 1.6 2006/04/20 17:08:17 acg
1144 // Andy Goodrich: 3.0 style process changes.
1145 //
1146 // Revision 1.5 2006/04/11 23:13:21 acg
1147 // Andy Goodrich: Changes for reduced reset support that only includes
1148 // sc_cthread, but has preliminary hooks for expanding to method and thread
1149 // processes also.
1150 //
1151 // Revision 1.4 2006/01/24 20:49:05 acg
1152 // Andy Goodrich: changes to remove the use of deprecated features within the
1153 // simulator, and to issue warning messages when deprecated features are used.
1154 //
1155 // Revision 1.3 2006/01/13 18:44:30 acg
1156 // Added $Log to record CVS changes into the source.
1157 
1158 #endif // !defined(sc_thread_process_h_INCLUDED)
friend void sc_thread_cor_fn(void *)
void report_immediate_self_notification() const
#define sc_assert(expr)
Definition: sc_report.h:235
sc_throw_it_helper * m_throw_helper_p
Definition: sc_process.h:921
sc_event * m_reset_event_p
Definition: sc_process.h:912
sc_process_b * sc_get_current_process_b()
bool deliver_event_at_time(sc_event *e, sc_timestamp e_delivery_time)
virtual void resume_process(sc_descendant_inclusion_info descendants=SC_NO_DESCENDANTS)
void add_monitor(sc_process_monitor *monitor_p)
friend void wait(int, sc_simcontext *)
OR list of events.
Definition: sc_event.h:228
virtual void throw_reset(bool async)
void notify()
The immediate notification is not supported by the out-of-order simulation in the current release...
void set_next_runnable(sc_thread_handle next_p)
A scoped mutex for the kernel lock.
process_throw_type m_throw_status
Definition: sc_process.h:922
#define SC_REPORT_ERROR(msg_type, msg)
Definition: sc_report.h:213
sc_process_b sc_process_b
Definition: sc_process.h:977
A time stamp combining timed cycles and delta cycles.
Definition: sc_process.h:434
const sc_time & get_time_count() const
Get the value of timed cycles.
void oooschedule(sc_cor *cor)
Scheduling function in the OoO simulation.
virtual void prepare_for_simulation()
sc_descendant_inclusion_info
Definition: sc_process.h:136
This class provides access to an sc_process_b object instance in a manner which allows some persisten...
void(sc_process_host::* SC_ENTRY_FUNC)()
Definition: sc_process.h:212
sc_thread_process(const char *name_p, bool free_host, SC_ENTRY_FUNC method_p, sc_process_host *host_p, const sc_spawn_options *opt_p)
AND list of events.
Definition: sc_event.h:193
void signal_monitors(int type=0)
#define SC_UNLIKELY_(x)
Definition: sc_cmnhdr.h:85
sc_event * m_timeout_event_p
Definition: sc_process.h:924
void wait(int, sc_simcontext *)
sc_cor * get_cor_pointer(sc_process_b *process_p)
Coroutine abstract base class.
Definition: sc_cor.h:57
const char * name() const
Definition: sc_object.h:71
sc_simcontext * simcontext() const
Definition: sc_object.h:85
const sc_event * m_event_p
Definition: sc_process.h:898
void sc_thread_cor_fn(void *arg)
int size() const
Definition: sc_event.h:643
void add_dynamic(sc_method_handle) const
User initiated dynamic process support.
Definition: sc_process.h:558
virtual void suspend_process(sc_descendant_inclusion_info descendants=SC_NO_DESCENDANTS)
void trigger_static(sc_event *)
A new parameter is added to update the local time stamp in the thread process.
void set_stack_size(std::size_t size)
void set_next_exist(sc_thread_handle next_p)
Class that manages the ready-to-run queues.
Definition: sc_runnable.h:42
trigger_t m_trigger_type
Definition: sc_process.h:925
sc_thread_handle next_exist()
void lock_all_channels(void)
Acquire all the channel locks.
class sc_thread_process * sc_thread_handle
Definition: sc_process.h:121
sc_process_b * m_exist_p
Definition: sc_process.h:902
const sc_timestamp & get_notify_timestamp() const
GET the notification time stamp.
void wait_cycles(int, int n=1)
A new parameter segment ID is added for the out-of-order simulation.
void remove_running_process(sc_process_b *)
Remove a process from the running queue.
The event class.
Definition: sc_event.h:260
virtual void disable_process(sc_descendant_inclusion_info descendants=SC_NO_DESCENDANTS)
The simulation context.
#define DEBUG_MSG(NAME, P, MSG)
friend class sc_unwind_exception
Definition: sc_process.h:581
Base class for all structural entities.
Definition: sc_module.h:83
void unlock_all_channels(void)
Release all the channel locks.
bool is_running_process(sc_process_b *)
Check whether a process is in the running queue.
sc_timestamp m_timestamp
The local time stamp of this process.
Definition: sc_process.h:944
friend void sc_set_stack_size(sc_thread_handle, std::size_t)
virtual void throw_user(const sc_throw_it_helper &helper, sc_descendant_inclusion_info descendants=SC_NO_DESCENDANTS)
Base class for all hierarchical channels.
Definition: sc_module.h:712
virtual void throw_it()=0
sc_thread_handle next_runnable()
virtual void kill_process(sc_descendant_inclusion_info descendants=SC_NO_DESCENDANTS)
sc_simcontext * sc_get_curr_simcontext()
void set_timestamp(const sc_timestamp &ts)
Get the local time stamp of this process.
void sc_set_stack_size(sc_method_handle, std::size_t)
friend sc_cor * get_cor_pointer(sc_process_b *process_p)
const sc_time SC_ZERO_TIME
This is the base class for objects which may have processes defined for their methods (e...
Definition: sc_process.h:149
void remove_monitor(sc_process_monitor *monitor_p)
bool trigger_dynamic(sc_event *, bool &)
sc_process_b * m_runnable_p
Definition: sc_process.h:914
std::vector< sc_process_monitor * > m_monitor_q
virtual const char * kind() const
value_type get_delta_count() const
Get the value of delta cycles.
bool is_runnable() const
Definition: sc_process.h:1047
virtual void enable_process(sc_descendant_inclusion_info descendants=SC_NO_DESCENDANTS)
void suspend_cor(sc_cor *)
Suspend a coroutine.
void set_segment_id(int id)
Get the current segment ID of this process.
int m_process_state
The name of this process.
Definition: sc_process.h:890
const sc_event_list * m_event_list_p
Definition: sc_process.h:901