Legend:
- Unmodified
- Added
- Removed
-
trunk/bin/graph
r12 r13 81 81 diff_overtime.sort() 82 82 83 print('packet_rtt diff mean: %f' % statistics.mean(diffs)) 83 84 print('packet_rtt diff median: %f' % statistics.median(diffs)) 84 85 print('packet_rtt diff midhinge: %f' % midsummary(diffs)) … … 86 87 print('packet_rtt diff quadsummary: %f' % quadsummary(diffs)) 87 88 print('packet_rtt diff ubersummary: %f' % ubersummary(diffs)) 89 print('packet_rtt diff septasummary: %f' % septasummary(diffs)) 88 90 print('packet_rtt diff MAD: %f' % mad(diffs)) 89 91 try: … … 91 93 print('reported diff quadsummary: %f' % quadsummary(reported_diffs)) 92 94 print('reported diff ubersummary: %f' % ubersummary(reported_diffs)) 95 print('reported diff septasummary: %f' % septasummary(reported_diffs)) 93 96 print('reported diff MAD: %f' % mad(reported_diffs)) 94 97 95 import cProfile96 start = time.time()97 kresults = kfilter({},diffs)98 #import cProfile 99 #start = time.time() 100 #kresults = kfilter({},diffs) 98 101 #print('packet_rtt diff kfilter: ', numpy.mean(kresults['est']), kresults['var']) 99 print('packet_rtt diff kfilter: ', kresults['est'][-1], kresults['var'][-1])100 kresults = kfilter({},reported_diffs)102 #print('packet_rtt diff kfilter: ', kresults['est'][-1], kresults['var'][-1]) 103 #kresults = kfilter({},reported_diffs) 101 104 #print('reported diff kfilter: ', numpy.mean(kresults['est']), kresults['var'][-1]) 102 print('reported diff kfilter: ', kresults['est'][-1], kresults['var'][-1])103 print("kfilter time: %f" % (time.time()-start))105 #print('reported diff kfilter: ', kresults['est'][-1], kresults['var'][-1]) 106 #print("kfilter time: %f" % (time.time()-start)) 104 107 except: 105 108 pass … … 111 114 112 115 116 117 118 def testKalman4D(params=None): 119 from pykalman import KalmanFilter 120 train = db.subseries('train','long', offset=0) 121 test = db.subseries('test','long', offset=0) 122 null = db.subseries('train_null','long', offset=0) 123 measurements = numpy.asarray([(s['unusual_packet'],s['other_packet'],s['unusual_tsval'],s['other_tsval']) for s in (train+test)]) 124 null_measurements = numpy.asarray([(s['unusual_packet'],s['other_packet'],s['unusual_tsval'],s['other_tsval']) for s in null]) 125 126 if params == None: 127 kf = KalmanFilter(n_dim_obs=4, n_dim_state=4, 128 initial_state_mean=[quadsummary([s['unusual_packet'] for s in train]), 129 quadsummary([s['other_packet'] for s in train]), 130 numpy.mean([s['unusual_tsval'] for s in train]), 131 numpy.mean([s['other_tsval'] for s in train])]) 132 kf = KalmanFilter(n_dim_obs=4, n_dim_state=4) 133 134 start=time.time() 135 kf = kf.em(measurements[0:len(train)]+null_measurements[0:50000], n_iter=10, 136 em_vars=('transition_matrices', 137 'observation_matrices', 138 'transition_offsets', 139 'observation_offsets', 140 'transition_covariance', 141 'observation_covariance', 142 'initial_state_mean', 143 'initial_state_covariance')) 144 params = {'transition_matrices': kf.transition_matrices.tolist(), 145 'observation_matrices': kf.observation_matrices.tolist(), 146 'transition_offsets': kf.transition_offsets.tolist(), 147 'observation_offsets': kf.observation_offsets.tolist(), 148 'transition_covariance': kf.transition_covariance.tolist(), 149 'observation_covariance': kf.observation_covariance.tolist(), 150 'initial_state_mean': kf.initial_state_mean.tolist(), 151 'initial_state_covariance': kf.initial_state_covariance.tolist()} 152 print("Learned Params:\n") 153 import pprint 154 pprint.pprint(params) 155 print("pykalman em time: %f" % (time.time()-start)) 156 157 #kf = KalmanFilter(n_dim_obs=2, n_dim_state=2, **params) 158 159 num_obs=5000 160 for offset in range(50000,100000+num_obs,num_obs): 161 start=time.time() 162 m = measurements[offset:offset+num_obs] 163 #params['initial_state_mean']=[quadsummary([s[0] for s in m]), 164 # quadsummary([s[1] for s in m]), 165 # numpy.mean([s[2] for s in m]), 166 # numpy.mean([s[3] for s in m])] 167 kf = KalmanFilter(n_dim_obs=4, n_dim_state=4, **params) 168 (smoothed_state_means, smoothed_state_covariances) = kf.smooth(m) 169 #print("pykalman smooth time: %f" % (time.time()-start)) 170 up = numpy.mean([m[0] for m in smoothed_state_means]) 171 op = numpy.mean([m[1] for m in smoothed_state_means]) 172 #print("packet_rtt pykalman final:", smoothed_state_means[-1][0]-smoothed_state_means[-1][1]) 173 print("packet_rtt pykalman mean:", up-op) 174 print("packet_rtt mean:", numpy.mean([s[0]-s[1] for s in m])) 175 #up = numpy.mean([m[2] for m in smoothed_state_means]) 176 #op = numpy.mean([m[3] for m in smoothed_state_means]) 177 #print("tsval_rtt pykalman final:", smoothed_state_means[-1][2]-smoothed_state_means[-1][3]) 178 #print("tsval_rtt pykalman mean:", up-op) 179 #print("tsval_rtt mean:", numpy.mean([s[2]-s[3] for s in m])) 180 181 for offset in range(0,len(null_measurements)+num_obs,num_obs): 182 start=time.time() 183 m = null_measurements[offset:offset+num_obs] 184 #params['initial_state_mean']=[quadsummary([s[0] for s in m]), 185 # quadsummary([s[1] for s in m]), 186 # numpy.mean([s[2] for s in m]), 187 # numpy.mean([s[3] for s in m])] 188 kf = KalmanFilter(n_dim_obs=4, n_dim_state=4, **params) 189 (smoothed_state_means, smoothed_state_covariances) = kf.smooth(m) 190 up = numpy.mean([m[0] for m in smoothed_state_means]) 191 op = numpy.mean([m[1] for m in smoothed_state_means]) 192 #print("null packet_rtt pykalman final:", smoothed_state_means[-1][0]-smoothed_state_means[-1][1]) 193 print("null packet_rtt pykalman mean:", up-op) 194 print("null packet_rtt mean:", numpy.mean([s[0]-s[1] for s in m])) 195 #up = numpy.mean([m[2] for m in smoothed_state_means]) 196 #op = numpy.mean([m[3] for m in smoothed_state_means]) 197 #print("null tsval_rtt pykalman final:", smoothed_state_means[-1][2]-smoothed_state_means[-1][3]) 198 #print("null tsval_rtt pykalman mean:", up-op) 199 #print("null tsval_rtt mean:", numpy.mean([s[2]-s[3] for s in m])) 200 201 202 203 echo_vm_5k={'initial_state_covariance': [[33599047.5, 204 -18251285.25, 205 3242535690.59375, 206 -8560730487.84375], 207 [-18251285.25, 208 9914252.3125, 209 -1761372688.59375, 210 4650260880.1875], 211 [3242535690.59375, 212 -1761372688.59375, 213 312926663745.03125, 214 -826168494791.7188], 215 [-8560730487.84375, 216 4650260880.1875, 217 -826168494791.7188, 218 2181195982530.4688]], 219 'initial_state_mean': [12939012.5625, 220 12934563.71875, 221 13134751.608, 222 13138990.9985], 223 'observation_covariance': [[11960180434.411114, 224 4760272534.795976, 225 8797551081.431936, 226 6908794128.927051], 227 [4760272534.795962, 228 12383598172.428213, 229 5470747537.2599745, 230 11252625555.297853], 231 [8797551081.431955, 232 5470747537.2601185, 233 1466222848395.7058, 234 72565713883.12643], 235 [6908794128.927095, 236 11252625555.297981, 237 72565713883.12654, 238 1519760903943.507]], 239 'observation_matrices': [[1.4255288693095167, 240 -0.4254638445329988, 241 0.0003406844036817347, 242 -0.0005475021956726778], 243 [-0.46467270827589857, 244 1.4654311778340343, 245 -0.0003321330280128265, 246 -0.0002853945703691352], 247 [-0.2644570970067974, 248 -0.33955835481495455, 249 1.7494161615202275, 250 -0.15394117603733548], 251 [-0.3419097544041847, 252 -0.23992883666045373, 253 -0.15587790880447727, 254 1.7292393175137022]], 255 'observation_offsets': [165.2279084503762, 256 157.76807691937614, 257 168.4235495099334, 258 225.33433430227353], 259 'transition_covariance': [[2515479496.145993, 260 -401423541.70620924, 261 1409951418.1627903, 262 255932902.74454522], 263 [-401423541.706214, 264 2744353887.676857, 265 1162316.2019491254, 266 1857251491.3987627], 267 [1409951418.1628358, 268 1162316.2020361447, 269 543279068599.8229, 270 -39399311190.5746], 271 [255932902.74459982, 272 1857251491.398838, 273 -39399311190.574585, 274 537826124257.5266]], 275 'transition_matrices': [[0.52163952865412, 276 0.47872618354122665, 277 -0.0004322286766109684, 278 0.00017293351811531466], 279 [0.5167436693545113, 280 0.48319044922845933, 281 7.765428142114672e-05, 282 -0.00021518950285326355], 283 [0.2091705950622469, 284 0.41051399729482796, 285 0.19341113299389256, 286 0.19562916616052917], 287 [0.368592004009912, 288 0.22263632461118732, 289 0.20756792378812872, 290 0.20977025833570906]], 291 'transition_offsets': [592.5708159274, 292 583.3804671015271, 293 414.4187239098291, 294 562.166786712371]} 295 296 echo_vm_5k={'initial_state_covariance': [[0.375, 0.0, 0.0, 0.0], 297 [0.0, 0.375, 0.0, 0.0], 298 [0.0, 0.0, 0.375, 0.0], 299 [0.0, 0.0, 0.0, 0.375]], 300 'initial_state_mean': [15997944.198361743, 301 16029825.435899183, 302 17093077.26228404, 303 17524263.088803563], 304 'observation_covariance': [[36572556646.179054, 305 21816054953.37006, 306 31144379008.310543, 307 19651005729.823025], 308 [21816054953.372543, 309 440428106325.20325, 310 41103447776.740585, 311 427146570672.51227], 312 [31144379008.31037, 313 41103447776.74027, 314 3280009435458.6953, 315 458734528073.65686], 316 [19651005729.82234, 317 427146570672.5109, 318 458734528073.6557, 319 3769493190697.773]], 320 'observation_matrices': [[1.0248853427592337, 321 -0.031198859962501047, 322 0.001613706836380402, 323 0.004720209443291878], 324 [-0.8604422900368718, 325 1.8583369609057172, 326 -0.0022646214457040514, 327 0.004437933935378169], 328 [-0.5814771409524866, 329 0.22228184387142846, 330 1.6259599749174072, 331 -0.271594798325566], 332 [-0.5862601003257453, 333 0.2598285939005791, 334 -0.28286590143513024, 335 1.604087079832425]], 336 'observation_offsets': [1979.4518332096984, 337 1889.3380163762793, 338 2132.9112026744906, 339 1750.7759421584785], 340 'transition_covariance': [[6176492087.271547, 341 762254719.4171592, 342 4584288694.652873, 343 3044796192.4357214], 344 [762254719.4185101, 345 173302376079.4761, 346 5261303152.757347, 347 167562483383.9925], 348 [4584288694.651718, 349 5261303152.755746, 350 1056156956874.4131, 351 -115859156952.07962], 352 [3044796192.434162, 353 167562483383.9901, 354 -115859156952.08018, 355 1225788436266.3086]], 356 'transition_matrices': [[0.9673912485796876, 357 0.03252962227543321, 358 0.0006756067792537124, 359 -0.0006566638567164773], 360 [0.9548761966068113, 361 0.03841774395880293, 362 0.00426067282319309, 363 0.002303362691861821], 364 [0.6215040230859188, 365 -0.2584476837756142, 366 0.3176491193420503, 367 0.3241682768126566], 368 [0.6634028281470279, 369 -0.33548335246018723, 370 0.3298144902195048, 371 0.3475836278392421]], 372 'transition_offsets': [1751.3049487348183, 373 1764.989515773476, 374 1986.8405778425586, 375 2232.830254345267]} 376 #testKalman4D(echo_vm_5k) 377 378 379 380 def testKalman(params=None): 381 from pykalman import AdditiveUnscentedKalmanFilter,KalmanFilter 382 train = db.subseries('train','long', offset=0) 383 test = db.subseries('test','long', offset=0) 384 measurements = numpy.asarray([(s['unusual_packet'],s['other_packet']) for s in (train+test)]) 385 386 #kf = KalmanFilter(transition_matrices = [[1, 1], [0, 1]], observation_matrices = [[0.1, 0.5], [-0.3, 0.0]]) 387 kf = KalmanFilter(n_dim_obs=2, n_dim_state=2, 388 initial_state_mean=[quadsummary([s['unusual_packet'] for s in train]), 389 quadsummary([s['other_packet'] for s in train])]) 390 #kf = AdditiveUnscentedKalmanFilter(n_dim_obs=2, n_dim_state=2) 391 392 if params == None: 393 start=time.time() 394 kf = kf.em(measurements[0:len(train)], n_iter=10, 395 em_vars=('transition_matrices', 396 'observation_matrices', 397 'transition_offsets', 398 'observation_offsets', 399 'transition_covariance', 400 'observation_covariance', 401 'initial_state_covariance')) 402 params = {'transition_matrices': kf.transition_matrices.tolist(), 403 'observation_matrices': kf.observation_matrices.tolist(), 404 'transition_offsets': kf.transition_offsets.tolist(), 405 'observation_offsets': kf.observation_offsets.tolist(), 406 'transition_covariance': kf.transition_covariance.tolist(), 407 'observation_covariance': kf.observation_covariance.tolist(), 408 'initial_state_mean': kf.initial_state_mean.tolist(), 409 'initial_state_covariance': kf.initial_state_covariance.tolist()} 410 print("Learned Params:\n") 411 import pprint 412 pprint.pprint(params) 413 print("pykalman em time: %f" % (time.time()-start)) 414 415 #kf = KalmanFilter(n_dim_obs=2, n_dim_state=2, **params) 416 417 num_obs=10000 418 for offset in range(50000,100000+num_obs,num_obs): 419 start=time.time() 420 kf = KalmanFilter(n_dim_obs=2, n_dim_state=2, **params) 421 m = measurements[offset:offset+num_obs] 422 (smoothed_state_means, smoothed_state_covariances) = kf.smooth(m) 423 print("pykalman smooth time: %f" % (time.time()-start)) 424 up = numpy.mean([m[0] for m in smoothed_state_means]) 425 op = numpy.mean([m[1] for m in smoothed_state_means]) 426 print("packet_rtt pykalman final:", smoothed_state_means[-1][0]-smoothed_state_means[-1][1]) 427 print("packet_rtt pykalman mean:", up-op) 428 print("packet_rtt mean:", numpy.mean([s[0]-s[1] for s in m])) 429 430 431 five_iter = {'observation_offsets': [-54.53185823, -55.25219184], 432 'observation_covariance': [[ 1.15059170e+10, 4.36743765e+09], 433 [ 4.36743765e+09, 1.19410313e+10]], 434 'initial_state_mean': [ 12939012.5625 , 12934563.71875], 435 'transition_covariance': [[ 2.98594543e+09, 6.86355073e+07], 436 [ 6.86355073e+07, 3.21368699e+09]], 437 'initial_state_covariance': [[ 2.36836696e+09, 1.63195635e+09], 438 [ 1.63195635e+09, 1.12452233e+09]], 439 'transition_offsets': [ 343.69740217, 338.5042467 ], 440 'observation_matrices': [[ 1.42539895, -0.4255261 ], 441 [-0.46280375, 1.46295189]], 442 'transition_matrices': [[ 0.56151623, 0.4385931 ], 443 [ 0.47309189, 0.52673508]]} 444 ten_iter = {'initial_state_covariance': [[229936928.28125, 41172601.0], 445 [41172601.0, 7372383.46875]], 446 'initial_state_mean': [12939012.5625, 12934563.71875], 447 'observation_covariance': [[11958914107.88334, 4761048283.066559], 448 [4761048283.066557, 12388186543.42032]], 449 'observation_matrices': [[1.4258395826727792, -0.42598392357467674], 450 [-0.4647443890462455, 1.4648767294384015]], 451 'observation_offsets': [165.409715349344, 157.96206130876212], 452 'transition_covariance': [[2515594742.7187943, -401728959.41375697], 453 [-401728959.41375697, 2743831805.402682]], 454 'transition_matrices': [[0.521306461057975, 0.47879632652984583], 455 [0.5167881285851763, 0.483006520280469]], 456 'transition_offsets': [592.4419187566978, 583.2272403965366]} 457 #testKalman(ten_iter) 458 459 113 460 def getTCPTSPrecision(): 114 461 cursor = db.conn.cursor() 115 query="""SELECT tcpts_mean FROM meta ;"""462 query="""SELECT tcpts_mean FROM meta""" 116 463 cursor.execute(query) 117 464 row = cursor.fetchone() … … 170 517 #plt.savefig('paper/graphs/dists-vs-dist-of-diffs2.svg') 171 518 172 tsFilteredHistogram() 173 sys.exit(0) 174 175 176 from pykalman import KalmanFilter 177 #kf = KalmanFilter(transition_matrices = [[1, 1], [0, 1]], observation_matrices = [[0.1, 0.5], [-0.3, 0.0]]) 178 kf = KalmanFilter(transition_matrices = [[1, 0], [0, 1]], n_dim_obs=2, observation_matrices = [[1.0, 0], [0, 1.0]]) 179 180 #delta = 1e-5 181 #trans_cov = delta / (1 - delta) * np.eye(2) 182 183 #kf = KalmanFilter(n_dim_obs=2, n_dim_state=2, 184 # initial_state_mean=np.zeros(2), 185 # initial_state_covariance=np.ones((2, 2)), 186 # transition_matrices=np.eye(2), 187 # observation_matrices=obs_mat, 188 # observation_covariance=1.0, 189 # transition_covariance=trans_cov) 190 191 192 #measurements = numpy.asarray([[1,0], [0,0], [0,1]]) # 3 observations 193 measurements = numpy.asarray([(s['unusual_packet'],s['other_packet']) for s in (db.subseries('train','long')+db.subseries('test','long'))]) 194 kf = kf.em(measurements, n_iter=5) 195 #(filtered_state_means, filtered_state_covariances) = kf.filter(measurements) 196 #print("packet_rtt pykalman:", filtered_state_means[-1][0]-filtered_state_means[-1][1]) 197 #print("packet_rtt pykalman:", filtered_state_means[-1]) 198 199 (smoothed_state_means, smoothed_state_covariances) = kf.smooth(measurements) 200 #up = numpy.mean([m[0] for m in smoothed_state_means]) 201 #op = numpy.mean([m[1] for m in smoothed_state_means]) 202 print("packet_rtt pykalman:", smoothed_state_means[-1], smoothed_state_means[-1][0]-smoothed_state_means[-1][1]) 203 #print("packet_rtt pykalman:", up, op, up-op) 519 #tsFilteredHistogram() 520 521 522 204 523 205 524 … … 267 586 classifiers.append(c[0]) 268 587 588 max_obs = 0 269 589 for classifier in classifiers: 270 590 query=""" … … 304 624 performance = [] 305 625 for row in cursor: 626 max_obs = max(max_obs, row[0]) 306 627 num_obs.append(row[0]) 307 628 performance.append(row[1]) … … 313 634 314 635 plt.legend([l[1] for l in legend], [l[0] for l in legend], scatterpoints=1, fontsize='xx-small') 636 plt.plot([0, max_obs], [5.0, 5.0], "k--") 315 637 plt.show() 316 638 -
trunk/bin/train
r11 r13 55 55 trainer = classifiers[classifier]['train'] 56 56 threshold = 5.0 # in percent 57 num_obs = 100057 num_obs = 7 58 58 max_obs = int(db.populationSize('train')/5) 59 59 result = None … … 75 75 classifiers[classifier]['train_results'].append(result) 76 76 77 if error < threshold :77 if error < threshold and num_obs > 100: 78 78 break 79 79
Note: See TracChangeset
for help on using the changeset viewer.