Changeset 16 for trunk/bin/graph
- Timestamp:
- 08/01/15 19:01:31 (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/bin/graph
r14 r16 36 36 parser.add_argument('db_file', default=None, 37 37 help='') 38 parser.add_argument('unusual_case', nargs='?', type=str, default=None, 39 help='The test case that is most unusual from the others. (default: auto detect)') 38 40 options = parser.parse_args() 39 41 db = nanownlib.storage.db(options.db_file) 42 if options.unusual_case == None: 43 unusual_case,delta = findUnusualTestCase(db) 40 44 41 45 … … 69 73 #print('(test): %f' % weightedMean(derived,weights)) 70 74 71 diffs = list(differences(db, 'long'))72 reported_diffs = list(differences(db, 'long', 'reported'))75 diffs = list(differences(db, unusual_case)) 76 reported_diffs = list(differences(db, unusual_case, 'reported')) 73 77 #shorts = [s['packet_rtt'] for s in samples.values() if s['test_case']=='short'] 74 78 #longs = [s['packet_rtt'] for s in samples.values() if s['test_case']=='long'] … … 76 80 77 81 def basicStatistics(): 78 print('packet_rtt diff mean: %f' % statistics.mean(diffs)) 79 print('packet_rtt diff median: %f' % statistics.median(diffs)) 80 print('packet_rtt diff midhinge: %f' % midsummary(diffs)) 81 print('packet_rtt diff trimean: %f' % trimean(diffs)) 82 print('packet_rtt diff quadsummary: %f' % quadsummary(diffs)) 83 print('packet_rtt diff ubersummary: %f' % ubersummary(diffs)) 84 print('packet_rtt diff septasummary: %f' % septasummary(diffs)) 85 print('packet_rtt diff MAD: %f' % mad(diffs)) 82 print('packet_rtt diff midhinge: %10.2f' % midsummary(diffs)) 83 print('packet_rtt diff quadsummary: %10.2f' % quadsummary(diffs)) 84 print('packet_rtt diff septasummary: %10.2f' % septasummary(diffs)) 85 print('packet_rtt diff MAD: %10.2f' % mad(diffs)) 86 86 try: 87 print('reported diff trimean: %f' % trimean(reported_diffs)) 88 print('reported diff quadsummary: %f' % quadsummary(reported_diffs)) 89 print('reported diff ubersummary: %f' % ubersummary(reported_diffs)) 90 print('reported diff septasummary: %f' % septasummary(reported_diffs)) 91 print('reported diff MAD: %f' % mad(reported_diffs)) 87 print('reported diff midhinge: %10.2f' % midsummary(reported_diffs)) 88 print('reported diff quadsummary: %10.2f' % quadsummary(reported_diffs)) 89 print('reported diff septasummary: %10.2f' % septasummary(reported_diffs)) 90 print('reported diff MAD: %10.2f' % mad(reported_diffs)) 92 91 93 92 #import cProfile … … 108 107 #print('tsval null diff weighted mean: %f' % tsvalwmean(db.subseries('train_null','long'))) 109 108 110 109 basicStatistics() 111 110 112 111 def exampleBoxTestHistogram(low,high): 113 112 num_bins = 300 114 all = db.subseries('train', 'long')+db.subseries('test','long')113 all = db.subseries('train',unusual_case)+db.subseries('test',unusual_case) 115 114 s = [s['other_packet'] for s in all] 116 115 l = [s['unusual_packet'] for s in all] … … 362 361 #plt.savefig('paper/graphs/dists-vs-dist-of-diffs2.svg') 363 362 364 #tsFilteredHistogram()363 tsFilteredHistogram() 365 364 366 365 … … 459 458 460 459 chartname = "/home/tim/blindspot/research/timing-analysis/paper/figures/results/%s.svg" % (basename) 461 print(chartname)460 #print(chartname) 462 461 463 462 plt.clf() … … 469 468 color_id = 0 470 469 471 cursor = db.conn.cursor() 472 query = """ 473 SELECT classifier FROM classifier_results GROUP BY classifier ORDER BY classifier; 474 """ 475 cursor.execute(query) 476 classifiers = [] 477 for c in cursor: 478 classifiers.append(c[0]) 479 480 best_obs = [] 481 best_error = [] 470 best_obs,best_error = evaluateTestResults(db) 471 best_obs = sorted(best_obs, key=lambda x: x['num_observations']) 472 best_error = sorted(best_error, key=lambda x: x['error']) 473 winner = None 474 for bo in best_obs: 475 sys.stdout.write("%(num_observations)d obs / %(classifier)s / %(params)s" % bo) 476 if winner == None: 477 sys.stdout.write(" (winner)") 478 winner = bo 479 print() 480 481 for be in best_error: 482 sys.stdout.write("%(error)f%% error / %(classifier)s / %(params)s" % be) 483 if winner == None: 484 sys.stdout.write(" (winner)") 485 winner = be 486 print() 487 488 all = sorted(best_obs+best_error, key=lambda x: x['classifier']) 482 489 max_obs = 0 483 for classifier in classifiers: 484 query=""" 485 SELECT params,num_observations FROM classifier_results 486 WHERE trial_type='test' 487 AND classifier=:classifier 488 AND (false_positives+false_negatives)/2.0 < 5.0 489 ORDER BY num_observations,(false_positives+false_negatives) 490 LIMIT 1 491 """ 492 cursor.execute(query, {'classifier':classifier}) 493 row = cursor.fetchone() 494 if row == None: 495 query=""" 496 SELECT params,(false_positives+false_negatives)/2 FROM classifier_results 497 WHERE trial_type='test' and classifier=:classifier 498 ORDER BY (false_positives+false_negatives),num_observations 499 LIMIT 1 500 """ 501 cursor.execute(query, {'classifier':classifier}) 502 row = cursor.fetchone() 503 if row == None: 504 sys.stderr.write("WARN: couldn't find test results for classifier '%s'.\n" % classifier) 505 continue 506 507 best_error.append((row[1], classifier)) 508 else: 509 best_obs.append((row[1], classifier)) 510 511 best_params = row[0] 490 for result in all: 512 491 query=""" 513 492 SELECT num_observations,(false_positives+false_negatives)/2.0 FROM classifier_results … … 517 496 ORDER BY num_observations 518 497 """ 519 cursor.execute(query, {'classifier':classifier,'params':best_params}) 498 cursor = db.conn.cursor() 499 cursor.execute(query, {'classifier':result['classifier'],'params':result['params']}) 520 500 521 501 num_obs = [] … … 528 508 path = plt.scatter(num_obs, performance, color=colors[color_id], s=4, alpha=0.8, linewidths=3.0) 529 509 plt.plot(num_obs, performance, color=colors[color_id], alpha=0.8) 530 legend.append(( classifier,path))510 legend.append((result['classifier'],path)) 531 511 color_id = (color_id+1) % len(colors) 532 512 533 best_obs.sort()534 best_error.sort()535 winner = None536 for bo in best_obs:537 sys.stdout.write("%d obs / %s" % bo)538 if winner == None:539 sys.stdout.write(" (winner)")540 winner = bo541 print()542 543 for be in best_error:544 sys.stdout.write("%f%% error / %s" % be)545 if winner == None:546 sys.stdout.write(" (winner)")547 winner = be548 print()549 550 513 plt.legend([l[1] for l in legend], [l[0] for l in legend], scatterpoints=1, fontsize='x-small') 551 514 plt.plot([0, max_obs], [5.0, 5.0], "k--") 552 515 plt.xlabel('Number of Observations') 553 516 plt.ylabel('Error Rate') 554 plt.savefig(chartname)555 #plt.show()517 #plt.savefig(chartname) 518 plt.show() 556 519 557 520 graphTestResults()
Note: See TracChangeset
for help on using the changeset viewer.