forked from deepspeedai/DeepSpeed
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdeepspeed_timer.py
More file actions
156 lines (137 loc) · 4.99 KB
/
deepspeed_timer.py
File metadata and controls
156 lines (137 loc) · 4.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
'''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import time
import logging
import psutil
import torch
def print_rank_0(message):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
class SynchronizedWallClockTimer:
"""Group of timers. Borrowed from Nvidia Megatron code"""
class Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, 'timer has already been started'
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, 'timer is not started'
torch.cuda.synchronize()
self.elapsed_ += (time.time() - self.start_time)
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
def __init__(self):
self.timers = {}
def __call__(self, name):
if name not in self.timers:
self.timers[name] = self.Timer(name)
return self.timers[name]
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = 'time (ms)'
for name in names:
elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer
string += ' | {}: {:.2f}'.format(name, elapsed_time)
print_rank_0(string)
class ThroughputTimer():
def __init__(self,
batch_size,
num_workers,
start_step=2,
steps_per_output=50,
monitor_memory=True,
logging_fn=None):
self.start_time = 0
self.end_time = 0
self.started = False
self.batch_size = batch_size
if batch_size is None:
self.batch_size = 1
self.num_workers = num_workers
self.start_step = start_step
self.epoch_count = 0
self.local_step_count = 0
self.total_step_count = 0
self.total_elapsed_time = 0
self.steps_per_output = steps_per_output
self.monitor_memory = monitor_memory
self.logging = logging_fn
if self.logging is None:
self.logging = logging.info
self.initialized = False
def update_epoch_count(self):
self.epoch_count += 1
self.local_step_count = 0
def _init_timer(self):
self.initialized = True
def start(self):
self._init_timer()
self.started = True
if self.total_step_count >= self.start_step:
torch.cuda.synchronize()
self.start_time = time.time()
def stop(self, report_speed=True):
if not self.started:
return
self.started = False
self.total_step_count += 1
self.local_step_count += 1
if self.total_step_count > self.start_step:
torch.cuda.synchronize()
self.end_time = time.time()
duration = self.end_time - self.start_time
self.total_elapsed_time += duration
if self.local_step_count % self.steps_per_output == 0:
if report_speed:
self.logging("{}/{}, SamplesPerSec={}".format(
self.epoch_count,
self.local_step_count,
self.avg_samples_per_sec()))
if self.monitor_memory:
virt_mem = psutil.virtual_memory()
swap = psutil.swap_memory()
self.logging("{}/{}, vm percent: {}, swap percent: {}".format(
self.epoch_count,
self.local_step_count,
virt_mem.percent,
swap.percent))
def avg_samples_per_sec(self):
if self.total_step_count > 0:
samples_per_step = self.batch_size * self.num_workers
total_step_offset = self.total_step_count - self.start_step
avg_time_per_step = self.total_elapsed_time / total_step_offset
# training samples per second
return samples_per_step / avg_time_per_step
return float("-inf")