2017年02月03日 情報科学類 オペレーティングシステム II 筑波大学 システム情報系 新城 靖 <yas@cs.tsukuba.ac.jp>
このページは、次の URL にあります。
http://www.coins.tsukuba.ac.jp/~yas/coins/os2-2016/2017-02-03
あるいは、次のページから手繰っていくこともできます。
http://www.coins.tsukuba.ac.jp/~yas/
http://www.cs.tsukuba.ac.jp/~yas/
struct timeval { time_t tv_sec; /* seconds. long int */ suseconds_t tv_usec; /* microseconds. long int */ }; int gettimeofday(struct timeval *tp, struct timezone *tzp); int settimeofday(const struct timeval *tp, const struct timezone *tzp);使い方
1: /* 2: gettimeofday-print.c -- get colander time and print 3: Created on: 2014/01/22 20:40:34 4: */ 5: 6: #include <sys/time.h> /* gettimeofday() */ 7: #include <time.h> /* ctime() */ 8: #include <stdio.h> 9: 10: main() 11: { 12: struct timeval tv; 13: time_t sec; 14: gettimeofday( &tv, NULL ); 15: sec = tv.tv_sec; 16: printf("%s", ctime(&sec) ); 17: }
$ make gettimeofday-print
cc gettimeofday-print.c -o gettimeofday-print
$ ./gettimeofday-print
Wed Feb 1 20:48:35 2017
$ date
Wed Feb 1 20:48:37 JST 2017
$
POSIX 1003.1, 2003 の
struct timespec
では、ナノ秒単位。
struct timespec { time_t tv_sec; /* Seconds. */ long int tv_nsec; /* Nanoseconds. */ }; int clock_settime(clockid_t clock_id, const struct timespec *tp); int clock_gettime(clockid_t clock_id, struct timespec *tp); int clock_getres(clockid_t clock_id, struct timespec *res);clock_id としては、CLOCK_REALTIME (カレンダ時刻)やCLOCK_MONOTONIC があ る。 カレンダ時刻は、変更できる。逆走させることも可能。
順方向のジャンプや逆走を避けて、カレンダ時刻を合わせるには、adjtime() を使う。
int adjtime(const struct timeval *delta, struct timeval *olddelta);
struct itimerval { struct timeval it_interval; /* next value */ struct timeval it_value; /* current value */ }; int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue);
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout); int poll(struct pollfd *fds, nfds_t nfds, int timeout); int epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout); int kevent(int kq, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout);ネットワーク・プログラムでよく使う。複数の入力を監視する。指定された時 間、入力がなければ、システム・コールから復帰する。
なにもしない時間切れ。
unsigned int sleep(unsigned int seconds); int usleep(useconds_t usec) int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
図? タイマ関連のハードウェアの基本モデル
2つの機能がある。
その他の割込み
linux-4.9.1/include/asm-generic/param.h 7: # define HZ CONFIG_HZ /* Internal kernel timer frequency */ linux-4.9.1/include/generated/autoconf.h 613: #define CONFIG_HZ 1000 linux-4.9.1/kernel/time/timer.c 57: __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; linux-4.9.1/include/linux/jiffies.h 77: extern u64 __jiffy_data jiffies_64; 78: extern unsigned long volatile __jiffy_data jiffies;
linux-4.9.1/kernel/time/tick-common.c 79: static void tick_periodic(int cpu) 80: { 81: if (tick_do_timer_cpu == cpu) { ... 87: do_timer(1); ... 89: update_wall_time(); 90: } ... 92: update_process_times(user_mode(get_irq_regs())); ... 94: }
linux-4.9.1/kernel/time/timer.c 57: __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; linux-4.9.1/kernel/time/timekeeping.c 2202: void do_timer(unsigned long ticks) 2203: { 2204: jiffies_64 += ticks; ... 2206: }
xtime_nsec >> shift
でナノ秒を表す。
linux-4.9.1/include/linux/timekeeper_internal.h 30: struct tk_read_base { ... 36: u32 shift; 37: u64 xtime_nsec; ... 39: }; 84: struct timekeeper { 85: struct tk_read_base tkr_mono; ... 87: u64 xtime_sec; ... 128: }; linux-4.9.1/kernel/time/timekeeping.c 75: static inline struct timespec64 tk_xtime(struct timekeeper *tk) 76: { 77: struct timespec64 ts; 78: 79: ts.tv_sec = tk->xtime_sec; 80: ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); 81: return ts; 82: }
linux-4.9.1/kernel/time/time.c 102: SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv, 103: struct timezone __user *, tz) 104: { 105: if (likely(tv != NULL)) { 106: struct timeval ktv; 107: do_gettimeofday(&ktv); 108: if (copy_to_user(tv, &ktv, sizeof(ktv))) 109: return -EFAULT; 110: } 111: if (unlikely(tz != NULL)) { 112: if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) 113: return -EFAULT; 114: } 115: return 0; 116: } linux-4.9.1/kernel/time/timekeeping.c 1149: void do_gettimeofday(struct timeval *tv) 1150: { 1151: struct timespec64 now; 1152: 1153: getnstimeofday64(&now); 1154: tv->tv_sec = now.tv_sec; 1155: tv->tv_usec = now.tv_nsec/1000; 1156: } 684: void getnstimeofday64(struct timespec64 *ts) 685: { 686: WARN_ON(__getnstimeofday64(ts)); 687: } 651: int __getnstimeofday64(struct timespec64 *ts) 652: { 653: struct timekeeper *tk = &tk_core.timekeeper; ... 655: s64 nsecs = 0; ... 660: ts->tv_sec = tk->xtime_sec; 661: nsecs = timekeeping_get_ns(&tk->tkr_mono); ... 665: ts->tv_nsec = 0; 666: timespec64_add_ns(ts, nsecs); ... 674: return 0; 675: }
linux-4.9.1/include/linux/timer.h 12: struct timer_list { ... 18: unsigned long expires; 19: void (*function)(unsigned long); 20: unsigned long data; ... 31: };
jiffies が増加して expires に達すれば、(*function)(data) を呼ぶ。
主に次の関数で操作する。
{ struct timer_list my_timer; // 構造体の宣言 init_timer(&my_timer); // 初期化 my_timer.expires = jiffies + delay; // どのくらい待ちたいか my_timer.data = (unsigned long)data; // 渡したいデータ my_timer.function = my_timer_func; // 関数 add_timer(&my_timer); // 登録 } void my_timer_func(unsigned long data) { ... }
linux-4.9.1/include/linux/hrtimer.h 33: enum hrtimer_mode { 34: HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ 35: HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ ... 39: }; 44: enum hrtimer_restart { 45: HRTIMER_NORESTART, /* Timer is not restarted */ 46: HRTIMER_RESTART, /* Timer must be restarted */ 47: }; 100: struct hrtimer { ... 103: enum hrtimer_restart (*function)(struct hrtimer *); ... 112: };主に次の関数で操作する。
struct hrtimer my_timer; hrtimer_init(&my_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); my_timer.function = my_timer_handler; ... hrtimer_start(&my_timer, ktime_set(0, t_nano), HRTIMER_MODE_REL); ... enum hrtimer_restart my_timer_handler(struct hrtimer *timer) { ... return HRTIMER_NORESTART; }
例: Ethernet のドライバでモードを変更して 2 マイクロ秒だけ待つ。
様々な方法がある。
例1: 10 tick (インターバル・タイマによる割り込み)を待つ。
unsigned long timeout = jiffies + 10; // 10 ticks while (time_before(jiffies,timeout)) continue;例2: 2秒待つ
unsigned long delay = jiffies + 2*HZ; // 2秒 while (time_before(jiffies,timeout)) continue;
unsigned long timeout = jiffies + 10; // 10 ticks while (jiffies<timeout) continue;引き算して 0 と比較すると、オーバフローの問題が解決できる。
unsigned long timeout = jiffies + 10; // 10 ticks while (jiffies-timeout<0) continue;次のマクロを使う方法もある。
linux-4.9.1/include/linux/jiffies.h 102: #define time_after(a,b) \ 103: (typecheck(unsigned long, a) && \ 104: typecheck(unsigned long, b) && \ 105: ((long)((b) - (a)) < 0)) 106: #define time_before(a,b) time_after(b,a) 107: 108: #define time_after_eq(a,b) \ 109: (typecheck(unsigned long, a) && \ 110: typecheck(unsigned long, b) && \ 111: ((long)((a) - (b)) >= 0)) 112: #define time_before_eq(a,b) time_after_eq(b,a)
unsigned long delay = jiffies + 2*HZ; // 2秒 while (time_before(jiffies,timeout)) cond_resched();他に実行すべき重要なプロセスが存在する(条件)時には、スケジューラを呼ん で、実行する。存在しなければ、空ループと同じ。ただし、スケジューラを呼 ぶ(sleepする可能性がある)ので、割り込みコンテキストからは使えない。
void ndelay(unsigned long nsecs) void udelay(unsigned long usecs) void mdelay(unsigned long msecs)udelay() は、ある回数のループで実装されている。回数は、CPUの速度等で決 まる。ndelay(), mdelay() は、udelay() を呼んでいる。
udelay() で1ミリ秒以上待ってはいけない。 ループのインデックスがオーバフローする可能性がある。
set_current_state( TASK_INTERRUPTIBLE ); // signal で起きる可能性がある schedule_timeout( s * HZ );実装には struct timer_list が使われている。
表示 | 説明 |
NI | Nice。優先度を表す値。 |
$ /bin/ps l
F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND
0 1013 20638 20636 20 0 123572 2100 wait Ss pts/2 0:00 -bash
0 1013 21139 20638 20 0 155660 5900 poll_s S pts/2 0:02 xterm -class UXTerm -title uxterm -u8
0 1013 21150 21139 20 0 123552 2144 wait Ss pts/3 0:00 bash
0 1013 21560 20638 20 0 267808 22928 poll_s S+ pts/2 0:09 emacs -nw
0 1013 21784 21150 20 0 103748 956 signal T pts/3 0:00 lv kernel/time/timer.c
0 1013 27031 21150 20 0 108132 980 - R+ pts/3 0:00 /bin/ps l
$ /bin/nice /bin/ps l
F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND
0 1013 20638 20636 20 0 123572 2100 wait Ss pts/2 0:00 -bash
0 1013 21139 20638 20 0 155660 5900 poll_s S pts/2 0:02 xterm -class UXTerm -title uxterm -u8
0 1013 21150 21139 20 0 123552 2144 wait Ss pts/3 0:00 bash
0 1013 21560 20638 20 0 267808 22928 poll_s S+ pts/2 0:09 emacs -nw
0 1013 21784 21150 20 0 103748 956 signal T pts/3 0:00 lv kernel/time/timer.c
0 1013 27034 21150 30 10 108136 984 - RN+ pts/3 0:00 /bin/ps l
$ /bin/nice -19 /bin/ps l
F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND
0 1013 20638 20636 20 0 123572 2100 wait Ss pts/2 0:00 -bash
0 1013 21139 20638 20 0 155660 5900 - R pts/2 0:02 xterm -class UXTerm -title uxterm -u8
0 1013 21150 21139 20 0 123552 2144 wait Ss pts/3 0:00 bash
0 1013 21560 20638 20 0 267808 22928 poll_s S+ pts/2 0:09 emacs -nw
0 1013 21784 21150 20 0 103748 956 signal T pts/3 0:00 lv kernel/time/timer.c
0 1013 27035 21150 39 19 108132 984 - RN+ pts/3 0:00 /bin/ps l
$
1: /* 2: getpriority-pid.c -- 優先度の表示 3: ~yas/syspro/proc/getpriority-pid.c 4: Created on: 2009/12/14 12:15:11 5: */ 6: 7: #include <stdio.h> /* stderr, fprintf() */ 8: #include <sys/time.h> /* getpriority() */ 9: #include <sys/resource.h> /* getpriority() */ 10: #include <stdlib.h> /* strtol() */ 11: #include <limits.h> /* strtol() */ 12: 13: main( int argc, char *argv[] ) 14: { 15: int which, who, prio; 16: pid_t pid; 17: if( argc != 2 ) 18: { 19: fprintf(stderr,"Usage: %% %s pid\n",argv[0] ); 20: exit( 1 ); 21: } 22: pid = strtol( argv[1], NULL, 10 ); 23: prio = getpriority( PRIO_PROCESS, pid ); 24: printf("pid==%d, priority==%d\n", pid, prio); 25: }
$ ./getpriority-pid
Usage: % ./getpriority-pid pid
$ echo $$
21150
$ ./getpriority-pid
Usage: % ./getpriority-pid pid
$ ./getpriority-pid $$
pid==21150, priority==0
$ ./getpriority-pid 0
pid==0, priority==0
$ /bin/nice -10 ./getpriority-pid 0
pid==0, priority==10
$ /bin/nice -20 ./getpriority-pid 0
pid==0, priority==19
$
linux-4.9.1/include/linux/sched.h 1475: struct task_struct { ... 1483: volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ ... 1503: int prio, static_prio, normal_prio; 1504: unsigned int rt_priority; 1505: const struct sched_class *sched_class; 1506: struct sched_entity se; 1507: struct sched_rt_entity rt; ... 1511: struct sched_dl_entity dl; ... 1522: unsigned int policy; ... 1967: }; 1338: struct sched_entity { 1339: struct load_weight load; /* for load-balancing */ 1340: struct rb_node run_node; ... 1342: unsigned int on_rq; ... 1344: u64 exec_start; 1345: u64 sum_exec_runtime; 1346: u64 vruntime; ... 1373: }; 1239: struct load_weight { 1240: unsigned long weight; 1241: u32 inv_weight; 1242: };struct task_struct の中に、prio 等のフィールドやstruct sched_entity が ある。
linux-4.9.1/include/uapi/linux/sched.h 35: #define SCHED_NORMAL 0 36: #define SCHED_FIFO 1 37: #define SCHED_RR 2 38: #define SCHED_BATCH 3 39: /* SCHED_ISO: reserved but not implemented yet */ 40: #define SCHED_IDLE 5 41: #define SCHED_DEADLINE 6
linux-4.9.1/kernel/sys.c 245: SYSCALL_DEFINE2(getpriority, int, which, int, who) 246: { 247: struct task_struct *g, *p; 248: struct user_struct *user; 249: const struct cred *cred = current_cred(); 250: long niceval, retval = -ESRCH; 251: struct pid *pgrp; 252: kuid_t uid; 253: 254: if (which > PRIO_USER || which < PRIO_PROCESS) 255: return -EINVAL; ... 259: switch (which) { 260: case PRIO_PROCESS: 261: if (who) 262: p = find_task_by_vpid(who); 263: else 264: p = current; 265: if (p) { 266: niceval = nice_to_rlimit(task_nice(p)); 267: if (niceval > retval) 268: retval = niceval; 269: } 270: break; 271: case PRIO_PGRP: ... 282: case PRIO_USER: ... 302: } ... 307: return retval; 308: } linux-4.9.1/include/linux/sched/prio.h 4: #define MAX_NICE 19 5: #define MIN_NICE -20 6: #define NICE_WIDTH (MAX_NICE - MIN_NICE + 1) ... 21: #define MAX_USER_RT_PRIO 100 22: #define MAX_RT_PRIO MAX_USER_RT_PRIO 23: 24: #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) 25: #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) ... 32: #define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO) 33: #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) linux-4.9.1/include/linux/sched.h 2593: static inline int task_nice(const struct task_struct *p) 2594: { 2595: return PRIO_TO_NICE((p)->static_prio); 2596: }
glibc-2.12/sysdeps/unix/sysv/linux/getpriority.c 28: #define PZERO 20 ... 35: int 36: getpriority (enum __priority_which which, id_t who) 37: { 38: int res; 39: 40: res = INLINE_SYSCALL (getpriority, 2, (int) which, who); 41: if (res >= 0) 42: res = PZERO - res; 43: return res; 44: }
linux-4.9.1/kernel/sched/core.c 8806: /* 8807: * Nice levels are multiplicative, with a gentle 10% change for every 8808: * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 8809: * nice 1, it will get ~10% less CPU time than another CPU-bound task 8810: * that remained on nice 0. 8811: * 8812: * The "10% effect" is relative and cumulative: from _any_ nice level, 8813: * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 8814: * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 8815: * If a task goes up by ~10% and another task goes down by ~10% then 8816: * the relative distance between them is ~25%.) 8817: */ 8818: const int sched_prio_to_weight[40] = { 8819: /* -20 */ 88761, 71755, 56483, 46273, 36291, 8820: /* -15 */ 29154, 23254, 18705, 14949, 11916, 8821: /* -10 */ 9548, 7620, 6100, 4904, 3906, 8822: /* -5 */ 3121, 2501, 1991, 1586, 1277, 8823: /* 0 */ 1024, 820, 655, 526, 423, 8824: /* 5 */ 335, 272, 215, 172, 137, 8825: /* 10 */ 110, 87, 70, 56, 45, 8826: /* 15 */ 36, 29, 23, 18, 15, 8827: }; 737: static void set_load_weight(struct task_struct *p) 738: { 739: int prio = p->static_prio - MAX_RT_PRIO; 740: struct load_weight *load = &p->se.load; ... 751: load->weight = scale_load(sched_prio_to_weight[prio]); 752: load->inv_weight = sched_prio_to_wmult[prio]; 753: } linux-4.9.1/kernel/sched/sched.h 71: # define scale_load(w) (w)
名前 | 説明 |
---|---|
enqueue_task | プロセスが実行可能(runnable)になった |
dequeue_task | プロセスが実行可能ではなくなった |
yield_task | CPUを譲る。dequeueしてenqueue |
check_preempt_curr | 実行可能になった時にCPUを横取りすべきかをチェック |
pick_next_task | 次に実行すべきプロセスを選ぶ |
set_curr_task | スケジューリング・クラスが変更された |
task_tick | タイマ割込み(tick)の時に呼ばれる |
task_new | 新しいプロセスが生成された |
linux-4.9.1/kernel/sched/core.c 755: static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 756: { 757: update_rq_clock(rq); 758: if (!(flags & ENQUEUE_RESTORE)) 759: sched_info_queued(rq, p); 760: p->sched_class->enqueue_task(rq, p, flags); 761: } 762: 763: static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 764: { 765: update_rq_clock(rq); 766: if (!(flags & DEQUEUE_SAVE)) 767: sched_info_dequeued(rq, p); 768: p->sched_class->dequeue_task(rq, p, flags); 769: }
linux-4.9.1/kernel/sched/core.c 3968: static void __setscheduler(struct rq *rq, struct task_struct *p, 3969: const struct sched_attr *attr, bool keep_boost) 3970: { 3971: __setscheduler_params(p, attr); ... 3980: p->prio = normal_prio(p); ... 3982: if (dl_prio(p->prio)) 3983: p->sched_class = &dl_sched_class; 3984: else if (rt_prio(p->prio)) 3985: p->sched_class = &rt_sched_class; 3986: else 3987: p->sched_class = &fair_sched_class; 3988: } 3942: static void __setscheduler_params(struct task_struct *p, 3943: const struct sched_attr *attr) 3944: { 3945: int policy = attr->sched_policy; 3946: 3947: if (policy == SETPARAM_POLICY) 3948: policy = p->policy; 3949: 3950: p->policy = policy; 3951: 3952: if (dl_policy(policy)) 3953: __setparam_dl(p, attr); 3954: else if (fair_policy(policy)) 3955: p->static_prio = NICE_TO_PRIO(attr->sched_nice); ... 3962: p->rt_priority = attr->sched_priority; 3963: p->normal_prio = normal_prio(p); 3964: set_load_weight(p); 3965: }
p->prio
をpolicy に応じて設定する。
p->prio
の値に応じて
&dl_sched_class
か
&rt_sched_class
か
&fair_sched_class
のいずれかを指すようにする。
Linux CFS は、次の方法でスケジューリングを行なう。
図? runqueueの構造
linux-4.9.1/kernel/sched/sched.h 590: struct rq { ... 619: struct cfs_rq cfs; 620: struct rt_rq rt; 621: struct dl_rq dl; ... 723: }; 375: struct cfs_rq { ... 385: struct rb_root tasks_timeline; 386: struct rb_node *rb_leftmost; ... 452: }; linux-4.9.1/kernel/sched/core.c 95: DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
図? runqueueの構造(red-black tree)
linux-4.9.1/kernel/sched/fair.c 496: static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 497: { 498: struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; 499: struct rb_node *parent = NULL; 500: struct sched_entity *entry; 501: int leftmost = 1; 502: 503: /* 504: * Find the right place in the rbtree: 505: */ 506: while (*link) { 507: parent = *link; 508: entry = rb_entry(parent, struct sched_entity, run_node); 509: /* 510: * We dont care about collisions. Nodes with 511: * the same key stay together. 512: */ 513: if (entity_before(se, entry)) { 514: link = &parent->rb_left; 515: } else { 516: link = &parent->rb_right; 517: leftmost = 0; 518: } 519: } 520: 521: /* 522: * Maintain a cache of leftmost tree entries (it is frequently 523: * used): 524: */ 525: if (leftmost) 526: cfs_rq->rb_leftmost = &se->run_node; 527: 528: rb_link_node(&se->run_node, parent, link); 529: rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); 530: } 455: static inline int entity_before(struct sched_entity *a, 456: struct sched_entity *b) 457: { 458: return (s64)(a->vruntime - b->vruntime) < 0; 459: }
&parent->rb_left
), 大きければ右(&parent->rb_right
) に進む。
cfs_rq->rb_leftmost
にも保存。
linux-4.9.1/kernel/sched/core.c 3075: void scheduler_tick(void) 3076: { 3077: int cpu = smp_processor_id(); 3078: struct rq *rq = cpu_rq(cpu); 3079: struct task_struct *curr = rq->curr; ... 3085: curr->sched_class->task_tick(rq, curr, 0); ... 3097: }
linux-4.9.1/kernel/sched/fair.c 8592: static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 8593: { 8594: struct cfs_rq *cfs_rq; 8595: struct sched_entity *se = &curr->se; 8596: 8597: for_each_sched_entity(se) { 8598: cfs_rq = cfs_rq_of(se); 8599: entity_tick(cfs_rq, se, queued); 8600: } ... 8604: } 3667: static void 3668: entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 3669: { ... 3673: update_curr(cfs_rq); ... 3700: } 798: static void update_curr(struct cfs_rq *cfs_rq) 799: { 800: struct sched_entity *curr = cfs_rq->curr; 801: u64 now = rq_clock_task(rq_of(cfs_rq)); 802: u64 delta_exec; ... 807: delta_exec = now - curr->exec_start; ... 811: curr->exec_start = now; ... 816: curr->sum_exec_runtime += delta_exec; ... 819: curr->vruntime += calc_delta_fair(delta_exec, curr); ... 831: } 606: static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 607: { 608: if (unlikely(se->load.weight != NICE_0_LOAD)) 609: delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 610: 611: return delta; 612: }
$ cat /proc/sched_debug
Sched Debug Version: v0.09, 2.6.32-431.3.1.el6.x86_64 #1
now at 7955627655.961573 msecs
.jiffies : 12250294951
...
cpu#0, 2100.000 MHz
.nr_running : 1
...
.curr->pid : 30990
...
cfs_rq[0]:/
.exec_clock : 40812852.059736
...
rt_rq[0]:/
.rt_nr_running : 0
...
.nr_running : 1
...
runnable tasks:
task PID tree-key switches prio exec-runtime sum-exec sum-sleep
----------------------------------------------------------------------------------------------------------
R cat 30990 32644150.029656 2 120 32644150.029656 1.072543 0.366310 /
...
cpu#1, 2100.000 MHz
...
cpu#2, 2100.000 MHz
...
cpu#3, 2100.000 MHz
...
$ cat /proc/self/sched
cat (31354, #threads: 1)
---------------------------------------------------------
se.exec_start : 7962193228.073935
se.vruntime : 51856286.476132
se.sum_exec_runtime : 1.211193
...
se.load.weight : 1024
policy : 0
prio : 120
clock-delta : 127
$
void h(int a,int b, int c) { .... }これを実現するために、どのようなコードを書けばよいか。以下の空欄を埋め なさい。
struct timer_list my_timer; int my_arg_a,my_arg_b,my_arg_c; void f(unsigned long data) { init_timer( /*空欄(a)*/ ); my_timer.expires = /*空欄(b)*/; my_timer.data = 0; my_timer.function = /*空欄(c)*/; /*空欄(d)*/; } void my_timer_func(unsigned long data) { h( my_arg_a,my_arg_b,my_arg_c ); }
図? 4つの要素を持つリスト構造
注意: 正しい二分探索木は、複数存在する。