-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathread_data.py
399 lines (325 loc) · 14.3 KB
/
read_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
''' Preliminary functions. These will get called by the main functions.'''
import config
def csv_to_dict(filename):
# * Reads a .csv file and returns a list of dictionaries, one for each line
# with first line in .csv as keys.
# * Use only for filename = 'links' or filename = 'nodes'
from csv import DictReader
with open(filenames[filename], 'rb') as readfile:
return list(DictReader(readfile))
def link_id_info(links_dict, link_id):
# * Search through links_dict to return information for a specific link_id.
# * links_dict should be output of csv_to_dict(filenames['links'])
for line in links_dict:
if line['link_id'] == str(link_id):
return line
def nodes_to_link_id(links_dict):
# Reorganize links_dict to assign link_id (as string) to
# begin_node_id, end_node_id pair.
nodes_to_links_dict = {}
for link in links_dict:
nodes_to_links_dict[link['begin_node_id']
+','
+link['end_node_id']] = link['link_id']
return nodes_to_links_dict
def extract_data_from_line(line):
# * Input is a line from DictReader(csvfile).
calendar_days = {'01':0, '02':31, '03':59, '04':90, '05':120,
'06':151, '07':181, '08':212, '09':243,
'10':273, '11':304, '12':334}
date, hour = line['datetime'].split()
month, day = date.split('-')[1:3]
hour = int(hour[0:2])
begin_node_id = line['begin_node_id']
end_node_id = line['end_node_id']
time = calendar_days[month]*24 + (int(day)-1)*24 + hour
return time, begin_node_id+','+end_node_id,\
line['num_trips'], line['travel_time']
# Read raw data file and return rows, columns, entries as lists.
# rows is a list of ints.
# columns_offset_by_one is a list of strings.
# entries is a list of strings.
def read_data_csv():
from csv import DictReader
key = 'num_trips'
key = 'travel_time'
with open(filenames['raw_data'], 'rb') as csvfile:
reader = DictReader(csvfile)
links_dict = nodes_to_link_id(csv_to_dict('links'))
times, nodes, entries1, entries2 =\
map(list,zip(*[extract_data_from_line(line) for line in reader]))
rows = []
columns_offset_by_one = []
trips = []
travel_times = []
for i in range(len(times)):
if nodes[i] != '0,0':
rows.append(times[i])
columns_offset_by_one.append(links_dict[nodes[i]])
trips.append(entries1[i])
traveltimes.append(entries2[i])
return rows, columns_offset_by_one, trips, traveltimes
#def write_data_coo((rows, columns_offset_by_one, trips, traveltimes)):
def write_data_coo(rows, columns_offset_by_one, trips, traveltimes):
with open(filenames['data_coo_form'],'wb') as writefile:
rows = ','.join(map(str, rows))
columns_offset_by_one = ','.join(columns_offset_by_one)
trips = ','.join(trips)
traveltimes = ','.join(traveltimes)
writefile.write('rows='+rows+'\n')
writefile.write('columns_offset_by_one='+columns_offset_by_one+'\n')
writefile.write('trips='+trips+'\n')
writefile.write('travel_times='+traveltimes+'\n')
return None
## Link_ids range from 1 to 260855. But we also wrote blank-data for link_id 260856.
## Use this last link_id for debugging.
def write_data_array():
from time import time
from numpy import array
time0 = time()
with open(filenames['data_coo_form'],'rb') as readfile:
rows, columns_offset_by_one, trips, traveltimes = readfile.readlines()
rows = map(int, rows[5:].split(','))
print('Rows converted to ints,', time()-time0)
# columns_offset_by_one start at 1 because they are link_ids.
# columns start at 0.
columns_offset_by_one = map(int, columns_offset_by_one[22:].split(','))
print('Columns converted to ints,', time()-time0)
trips = trips[6:].rstrip().split(',')
print('Trips were read,', time()-time0)
traveltimes = traveltimes[13:].rstrip().split(',')
print('Travel times were read,', time()-time0)
# We reverse all the arrays so that we can write data to the new csv file
# starting at hour 0 instead of 8759.
rows.reverse()
columns_offset_by_one.reverse()
trips.reverse()
traveltimes.reverse()
time = rows[0]
line_trips = ['' for j in range(TOTAL_LINKS + 1)]
line_traveltimes = ['' for j in range(TOTAL_LINKS + 1)]
with open(filenames['data_trips'],'wb') as writefile_trips,\
open(filenames['data_traveltimes'],'wb') as writefile_traveltimes:
for i in range(len(rows)):
if time == rows[i]:
line_trips[columns_offset_by_one[i]-1] = trips[i]
line_traveltimes[columns_offset_by_one[i]-1] = traveltimes[i]
else:
time += 1
writefile_trips.write(','.join(line_trips)+'\n')
writefile_traveltimes.write(','.join(line_traveltimes)+'\n')
line_trips = ['' for j in range(TOTAL_LINKS + 1)]
line_traveltimes = ['' for j in range(TOTAL_LINKS + 1)]
line_trips[columns_offset_by_one[i]-1] = trips[i]
line_traveltimes[columns_offset_by_one[i]-1] = traveltimes[i]
writefile_trips.write(','.join(line_trips)+'\n')
writefile_traveltimes.write(','.join(line_traveltimes)+'\n')
return None
def find_full_links():
from csv import reader
from numpy import array, savetxt
reader = reader(open(filenames['data_trips'], 'rb'))
print('data_trips.csv is opened')
potential_full_links = range(1, TOTAL_LINKS + 1+1)
missing_entries = [0 for i in potential_full_links]
for line in reader:
defaulters = []
for link_id in potential_full_links:
missing_entries[link_id-1] += 1 - bool(line[link_id-1])
if missing_entries[link_id-1] > 30*24:
defaulters.append(link_id)
potential_full_links = [i for i in potential_full_links
if i not in defaulters]
print(reader.line_num, len(potential_full_links))
savetxt(filenames['full_link_ids'], array(potential_full_links),
fmt='%d')
return potential_full_links
def find_empty_links():
from csv import reader
from numpy import array, savetxt
reader = reader(open(filenames['data_trips'], 'rb'))
print('data_trips.csv is opened')
potential_empty_links = range(1, TOTAL_LINKS + 1+1)
entries = [0 for i in potential_empty_links]
for line in reader:
defaulters = []
for link_id in potential_empty_links:
entries[link_id-1] += bool(line[link_id-1])
if entries[link_id-1] > 30*24:
defaulters.append(link_id)
potential_empty_links = [i for i in potential_empty_links
if i not in defaulters]
print(reader.line_num, len(potential_empty_links))
savetxt(filenames['empty_link_ids'], array(potential_empty_links),
fmt='%d')
return potential_empty_links
def write_full_link_data():
from numpy import loadtxt
from csv import reader as csvreader
from json import dump
from numpy import nan
full_link_ids = loadtxt(filenames['full_link_ids'], dtype='int')
V = []
with open(filenames['data_trips'],'rb') as readfile:
reader = csvreader(readfile)
for line in reader:
V.append(map(float, [line[i-1] if bool(line[i-1]) else nan
for i in full_link_ids]))
print(reader.line_num)
dump(V, open(filenames['full_link_trips'], 'wb'))
V = []
with open(filenames['data_traveltimes'],'rb') as readfile:
reader = csvreader(readfile)
for line in reader:
V.append(map(float, [line[i-1] if bool(line[i-1]) else nan
for i in full_link_ids]))
print(reader.line_num)
dump(V, open(filenames['full_link_traveltimes'], 'wb'))
return None
def read_full_link_json(filenames):
# Use this when you wish to read the array for only 2302 full links.
import json
import numpy as np
full_link_ids = np.loadtxt(filenames['full_link_ids'], dtype='int')
if config.TRIPS == 0:
filename = filenames['full_link_speeds']
print('Speeds')
elif config.TRIPS == 1:
filename = filenames['full_link_trips']
print('Trips')
else:
print('Error: invalid argument')
return None
V = json.load(open(filename, 'rb'))
assert len(full_link_ids) == config.FULL_LINKS
assert len(V) == config.HOURS_IN_YEAR
assert len(V[0]) == config.FULL_LINKS
return list(full_link_ids), np.array(V)
from numpy import nan
def replace_placeholder(data, placeholder = nan, value = 0.):
from numpy import isnan, array
if isnan(placeholder):
V = [i if ~isnan(i) else value for i in data.flatten()]
else:
V = [i if i!=placeholder else value for i in data.flatten()]
return array(V).reshape(data.shape)
def autocorrelation(data):
from matplotlib.pyplot import plot, xlabel, ylabel, show
from numpy import nanmean, nanvar, mean, multiply, arange
# We choose 38 days as the max possible periodicity in traffic.
START_PERIOD = 1
END_PERIOD = 38
V = replace_placeholder(data, value = nanmean(data))
# We don't take the variance of entries that we replaced with nanmean.
sigma2 = nanvar(data)
autocorr_dict = {period:0 for period in range(START_PERIOD,END_PERIOD+1)}
Deviations = V - nanmean(V, axis=0)
for period in range(START_PERIOD, END_PERIOD+1):
# Convert period in days to period in hours as 24*period.
autocorr = nanmean([multiply(Deviations[t],Deviations[t+24*period])
for t in range(len(V)-24*period)])/sigma2
autocorr_dict[period] = autocorr
print(period)
# Peaks in plot correspond to high autocorrelation i.e. high
# periodicity trend.
plot(arange(START_PERIOD-0.5, END_PERIOD-0.5+1),
[autocorr_dict[period] for period in range(START_PERIOD, END_PERIOD+1)],
'o-')
ylabel('Average autocorellation over full links')
xlabel('Assumed period of data (in days)')
show()
#legend(bbox_to_anchor=(1.35, 0.95))
return None
def autocorrelation_hourly(data):
from matplotlib.pyplot import plot, xlabel, ylabel, show
from numpy import nanmean, nanvar, mean, multiply, arange
# We choose 7 days and plus-minus 6 hours as the possible periodicity
# in traffic.
START_PERIOD = 7*24 - 6
END_PERIOD = 7*24 + 6
V = replace_placeholder(data, value = nanmean(data))
# We don't take the variance of entries that we replaced with nanmean.
sigma2 = nanvar(data)
autocorr_dict = {period:0 for period in range(START_PERIOD,END_PERIOD+1)}
Deviations = V - nanmean(V, axis=0)
for period in range(START_PERIOD, END_PERIOD+1):
autocorr = nanmean([multiply(Deviations[t],Deviations[t+period])
for t in range(len(V)-period)])/sigma2
autocorr_dict[period] = autocorr
print(period)
# Peaks in plot correspond to high autocorrelation i.e. high
# periodicity trend.
plot(arange(START_PERIOD, END_PERIOD+1),
[autocorr_dict[period] for period in range(START_PERIOD, END_PERIOD+1)],
'o-')
ylabel('Average autocorellation over full links')
xlabel('Assumed period of data (in hours)')
show()
#legend(bbox_to_anchor=(1.35, 0.95))
return None
def find_Phase2_links():
from numpy import loadtxt
empty_link_ids = loadtxt(filenames['empty_link_ids'], dtype=int)
assert len(empty_link_ids) == EMPTY_LINKS + 1
Phase2_links = []
j = 0
for i in range(1, TOTAL_LINKS+1+1):
if i < empty_link_ids[j]:
Phase2_links.append(i)
print(i)
elif i == empty_link_ids[j]:
j +=1
else:
print('Error!')
# We don't have to worry about tail-end because last empty_link and
# last total_link are the same (i.e. 260856th)
assert len(Phase2_links) == TOTAL_LINKS+1 - (EMPTY_LINKS+1)
return Phase2_links
## Link_ids range from 1 to 260855. But we also wrote blank-data for link_id 260856.
## Use this last link_id for debugging.
def write_data_array_transpose():
from numpy import array, loadtxt
with open(filenames['data_coo_form'],'rb') as readfile:
hours, link_ids, trips, traveltimes = readfile.readlines()
hours = map(int, hours[5:].strip().split(','))
link_ids = map(int, link_ids[22:].strip().split(',')) # this list includes duplicates
trips = trips[6:].strip().split(',')
hours, link_ids, trips = zip(*[(h,l,t) for (h,l,t)
in sorted(zip(hours,link_ids,trips),
key=lambda pair:pair[1])])
assert link_ids[0] == 1
link = 1
line_trips = ['' for j in range(HOURS_IN_YEAR)]
file_create = open(filenames['data_trips_transpose'],'wb')
file_create.close()
for i in range(len(link_ids)):
if link == link_ids[i]:
line_trips[hours[i]] = trips[i]
else:
while link < link_ids[i]:
link += 1
with open(filenames['data_trips_transpose'],'ab') as writefile_trips:
writefile_trips.write(','.join(line_trips)+'\n')
line_trips = ['' for j in range(HOURS_IN_YEAR)]
line_trips[hours[i]] = trips[i]
with open(filenames['data_trips_transpose'],'ab') as writefile_trips:
writefile_trips.write(','.join(line_trips)+'\n')
line_trips = ['' for j in range(HOURS_IN_YEAR)]
while link<TOTAL_LINKS-1:
link += 1
with open(filenames['data_trips_transpose'],'ab') as writefile_trips:
writefile_trips.write(','.join(line_trips)+'\n')
with open(filenames['data_trips_transpose'],'ab') as writefile_trips:
writefile_trips.write(','.join(line_trips))
return None
#write_data_array_transpose()
#empty_links = find_empty_links()
#print len(empty_links)
'''
from matplotlib.pyplot import plot, show
full_link_ids, V = read_full_link_json(trips=TRIPS)
print type(full_link_ids[0])
print full_link_ids.index(169017)
[plot(range(24), V[i*24:(i+1)*24,1]) for i in range(7)]
show()
'''