source: trunk/lib/nanownlib/train.py @ 12

Last change on this file since 12 was 11, checked in by tim, 9 years ago

.

File size: 11.5 KB
RevLine 
[11]1
2import time
3import statistics
4import functools
5import pprint
6import json
7
8from .stats import *
9from .parallel import WorkerThreads
10
11def trainBoxTest(db, unusual_case, greater, num_observations):
12    db.resetOffsets()
13   
14    def trainAux(low,high,num_trials):
15        estimator = functools.partial(multiBoxTest, {'low':low, 'high':high}, greater)
16        estimates = bootstrap3(estimator, db, 'train', unusual_case, num_observations, num_trials)
17        null_estimates = bootstrap3(estimator, db, 'train_null', unusual_case, num_observations, num_trials)
18
19        bad_estimates = len([e for e in estimates if e != 1])
20        bad_null_estimates = len([e for e in null_estimates if e != 0])
21       
22        false_negatives = 100.0*bad_estimates/num_trials
23        false_positives = 100.0*bad_null_estimates/num_trials
24        return false_positives,false_negatives
25
26    #start = time.time()
27    wt = WorkerThreads(2, trainAux)
28   
29    num_trials = 200
30    width = 1.0
31    performance = []
32    for low in range(0,50):
33        wt.addJob(low, (low,low+width,num_trials))
34    wt.wait()
35    while not wt.resultq.empty():
36        job_id,errors = wt.resultq.get()
37        fp,fn = errors
38        performance.append(((fp+fn)/2.0, job_id, fn, fp))
39    performance.sort()
40    #pprint.pprint(performance)
41    #print(time.time()-start)
42   
43    num_trials = 200
44    lows = [p[1] for p in performance[0:5]]
45    widths = [w/10.0 for w in range(5,65,5)]
46    performance = []
47    for width in widths:
48        false_positives = []
49        false_negatives = []
50        for low in lows:
51            wt.addJob(low,(low,low+width,num_trials))
52        wt.wait()
53        while not wt.resultq.empty():
54            job_id,errors = wt.resultq.get()
55            fp,fn = errors
56            false_negatives.append(fn)
57            false_positives.append(fp)
58
59        #print(width, false_negatives)
60        #print(width, false_positives)
61        #performance.append(((statistics.mean(false_positives)+statistics.mean(false_negatives))/2.0,
62        #                    width, statistics.mean(false_negatives), statistics.mean(false_positives)))
63        performance.append((abs(statistics.mean(false_positives)-statistics.mean(false_negatives)),
64                            width, statistics.mean(false_negatives), statistics.mean(false_positives)))
65    performance.sort()
66    #pprint.pprint(performance)
67    good_width = performance[0][1]
68    #print("good_width:",good_width)
69
70
71    num_trials = 500
72    performance = []
73    for low in lows:
74        wt.addJob(low, (low,low+good_width,num_trials))
75    wt.wait()
76    while not wt.resultq.empty():
77        job_id,errors = wt.resultq.get()
78        fp,fn = errors
79        performance.append(((fp+fn)/2.0, job_id, fn, fp))
80    performance.sort()
81    #pprint.pprint(performance)
82    best_low = performance[0][1]
83    #print("best_low:", best_low)
84
85   
86    num_trials = 500
87    widths = [good_width+(x/100.0) for x in range(-70,75,5) if good_width+(x/100.0) > 0.0]
88    performance = []
89    for width in widths:
90        wt.addJob(width, (best_low,best_low+width,num_trials))
91    wt.wait()
92    while not wt.resultq.empty():
93        job_id,errors = wt.resultq.get()
94        fp,fn = errors
95        #performance.append(((fp+fn)/2.0, job_id, fn, fp))
96        performance.append((abs(fp-fn), job_id, fn, fp))
97    performance.sort()
98    #pprint.pprint(performance)
99    best_width=performance[0][1]
100    #print("best_width:",best_width)
101    #print("final_performance:", performance[0][0])
102
103    wt.stop()
104    params = json.dumps({"low":best_low,"high":best_low+best_width}, sort_keys=True)
105    return {'trial_type':"train",
106            'num_observations':num_observations,
107            'num_trials':num_trials,
108            'params':params,
109            'false_positives':performance[0][3],
110            'false_negatives':performance[0][2]}
111
112
113def trainSummary(summaryFunc, db, unusual_case, greater, num_observations):
114    db.resetOffsets()
115    stest = functools.partial(summaryTest, summaryFunc)
116   
117    def trainAux(distance, threshold, num_trials):
118        estimator = functools.partial(stest, {'distance':distance,'threshold':threshold}, greater)
119        estimates = bootstrap3(estimator, db, 'train', unusual_case, num_observations, num_trials)
120        null_estimates = bootstrap3(estimator, db, 'train_null', unusual_case, num_observations, num_trials)
121
122        bad_estimates = len([e for e in estimates if e != 1])
123        bad_null_estimates = len([e for e in null_estimates if e != 0])
124       
125        false_negatives = 100.0*bad_estimates/num_trials
126        false_positives = 100.0*bad_null_estimates/num_trials
127        return false_positives,false_negatives
128
129    #determine expected delta based on differences
130    mean_diffs = [s['unusual_packet']-s['other_packet'] for s in db.subseries('train', unusual_case)]
131    threshold = summaryFunc(mean_diffs)/2.0
132    #print("init_threshold:", threshold)
133   
134    wt = WorkerThreads(2, trainAux)
135   
136    num_trials = 500
137    performance = []
138    for distance in range(1,50):
139        wt.addJob(distance, (distance,threshold,num_trials))
140    wt.wait()
141    while not wt.resultq.empty():
142        job_id,errors = wt.resultq.get()
143        fp,fn = errors
144        performance.append(((fp+fn)/2.0, job_id, fn, fp))
145   
146    performance.sort()
147    #pprint.pprint(performance)
148    good_distance = performance[0][1]
149    #print("good_distance:",good_distance)
150
151   
152    num_trials = 500
153    performance = []
154    for t in range(80,122,2):
155        wt.addJob(threshold*(t/100.0), (good_distance,threshold*(t/100.0),num_trials))
156    wt.wait()
157    while not wt.resultq.empty():
158        job_id,errors = wt.resultq.get()
159        fp,fn = errors
160        #performance.append(((fp+fn)/2.0, job_id, fn, fp))
161        performance.append((abs(fp-fn), job_id, fn, fp))
162    performance.sort()
163    #pprint.pprint(performance)
164    good_threshold = performance[0][1]
165    #print("good_threshold:", good_threshold)
166
167   
168    num_trials = 500
169    performance = []
170    for d in [good_distance+s for s in range(-4,5)
171              if good_distance+s > -1 and good_distance+s < 51]:
172        wt.addJob(d, (d,good_threshold,num_trials))
173    wt.wait()
174    while not wt.resultq.empty():
175        job_id,errors = wt.resultq.get()
176        fp,fn = errors
177        performance.append(((fp+fn)/2.0, job_id, fn, fp))
178    performance.sort()
179    #pprint.pprint(performance)
180    best_distance = performance[0][1]
181    #print("best_distance:",best_distance)
182
183   
184    num_trials = 500
185    performance = []
186    for t in range(90,111):
187        wt.addJob(good_threshold*(t/100.0), (best_distance,good_threshold*(t/100.0),num_trials))
188    wt.wait()
189    while not wt.resultq.empty():
190        job_id,errors = wt.resultq.get()
191        fp,fn = errors
192        #performance.append(((fp+fn)/2.0, job_id, fn, fp))
193        performance.append((abs(fp-fn), job_id, fn, fp))
194    performance.sort()
195    #pprint.pprint(performance)
196    best_threshold = performance[0][1]
197    #print("best_threshold:", best_threshold)
198
199    wt.stop()
200    params = json.dumps({'distance':best_distance,'threshold':best_threshold}, sort_keys=True)
201    return {'trial_type':"train",
202            'num_observations':num_observations,
203            'num_trials':num_trials,
204            'params':params,
205            'false_positives':performance[0][3],
206            'false_negatives':performance[0][2]}
207
208
209def trainKalman(db, unusual_case, greater, num_observations):
210    db.resetOffsets()
211
212    def trainAux(params, num_trials):
213        estimator = functools.partial(kalmanTest, params, greater)
214        estimates = bootstrap3(estimator, db, 'train', unusual_case, num_observations, num_trials)
215        null_estimates = bootstrap3(estimator, db, 'train_null', unusual_case, num_observations, num_trials)
216       
217        bad_estimates = len([e for e in estimates if e != 1])
218        bad_null_estimates = len([e for e in null_estimates if e != 0])
219       
220        false_negatives = 100.0*bad_estimates/num_trials
221        false_positives = 100.0*bad_null_estimates/num_trials
222        return false_positives,false_negatives
223   
224    mean_diffs = [s['unusual_packet']-s['other_packet'] for s in db.subseries('train', unusual_case)]
225    good_threshold = kfilter({},mean_diffs)['est'][-1]/2.0
226
227    wt = WorkerThreads(2, trainAux)
228    num_trials = 200
229    performance = []
230    for t in range(90,111):
231        params = {'threshold':good_threshold*(t/100.0)}
232        wt.addJob(good_threshold*(t/100.0), (params,num_trials))
233    wt.wait()
234    while not wt.resultq.empty():
235        job_id,errors = wt.resultq.get()
236        fp,fn = errors
237        #performance.append(((fp+fn)/2.0, job_id, fn, fp))
238        performance.append((abs(fp-fn), job_id, fn, fp))
239    performance.sort()
240    #pprint.pprint(performance)
241    best_threshold = performance[0][1]
242    #print("best_threshold:", best_threshold)
243    params = {'threshold':best_threshold}
244
245    wt.stop()
246   
247    return {'trial_type':"train",
248            'num_observations':num_observations,
249            'num_trials':num_trials,
250            'params':json.dumps(params, sort_keys=True),
251            'false_positives':performance[0][3],
252            'false_negatives':performance[0][2]}
253
254   
255def trainTsval(db, unusual_case, greater, num_observations):
256    db.resetOffsets()
257
258    def trainAux(params, num_trials):
259        estimator = functools.partial(tsvalwmeanTest, params, greater)
260        estimates = bootstrap3(estimator, db, 'train', unusual_case, num_observations, num_trials)
261        null_estimates = bootstrap3(estimator, db, 'train_null', unusual_case, num_observations, num_trials)
262       
263        bad_estimates = len([e for e in estimates if e != 1])
264        bad_null_estimates = len([e for e in null_estimates if e != 0])
265       
266        false_negatives = 100.0*bad_estimates/num_trials
267        false_positives = 100.0*bad_null_estimates/num_trials
268        return false_positives,false_negatives
269   
270    train = db.subseries('train', unusual_case)
271    null = db.subseries('train_null', unusual_case)
272    good_threshold = (tsvalwmean(train)+tsvalwmean(null))/2.0
273
274    wt = WorkerThreads(2, trainAux)
275    num_trials = 200
276    performance = []
277    for t in range(90,111):
278        params = {'threshold':good_threshold*(t/100.0)}
279        wt.addJob(good_threshold*(t/100.0), (params,num_trials))
280    wt.wait()
281    while not wt.resultq.empty():
282        job_id,errors = wt.resultq.get()
283        fp,fn = errors
284        #performance.append(((fp+fn)/2.0, job_id, fn, fp))
285        performance.append((abs(fp-fn), job_id, fn, fp))
286    performance.sort()
287    #pprint.pprint(performance)
288    best_threshold = performance[0][1]
289    #print("best_threshold:", best_threshold)
290    params = {'threshold':best_threshold}
291
292    wt.stop()
293   
294    return {'trial_type':"train",
295            'num_observations':num_observations,
296            'num_trials':num_trials,
297            'params':json.dumps(params, sort_keys=True),
298            'false_positives':performance[0][3],
299            'false_negatives':performance[0][2]}
300
301
302classifiers = {'boxtest':{'train':trainBoxTest, 'test':multiBoxTest, 'train_results':[]},
303               'midsummary':{'train':functools.partial(trainSummary, midsummary), 'test':midsummaryTest, 'train_results':[]},
304               'ubersummary':{'train':functools.partial(trainSummary, ubersummary), 'test':ubersummaryTest, 'train_results':[]},
305               'quadsummary':{'train':functools.partial(trainSummary, quadsummary), 'test':quadsummaryTest, 'train_results':[]},
306               'tsvalwmean':{'train':trainTsval, 'test':tsvalwmeanTest, 'train_results':[]},
307               #'kalman':{'train':trainKalman, 'test':kalmanTest, 'train_results':[]},
308               #'_trimean':{'train':None, 'test':trimeanTest, 'train_results':[]},
309              }
Note: See TracBrowser for help on using the repository browser.