########################################################
#
# Guanhua Yan (ghyan@binghamton.edu)
#
# Evaluate exploitability of binary executable programs.
#
########################################################

import os, sys, glob, subprocess, argparse, copy, random
from datetime import datetime
import bin_pca

vuln_map = {}
vuln_map['ReturnAv'] = 1
vuln_map['UseAfterFree'] = 2
vuln_map['SegFaultOnPc'] = 3
vuln_map['BranchAv'] = 4
vuln_map['StackCodeExecution'] = 5
vuln_map['StackBufferOverflow'] = 6
vuln_map['PossibleStackCorruption'] = 7
vuln_map['DestAv'] = 8
vuln_map['BadInstruction'] = 9
vuln_map['HeapError'] = 10
vuln_map['StackOverflow'] = 11
vuln_map['SegFaultOnPcNearNull'] = 12
vuln_map['BranchAvNearNull'] = 13
vuln_map['BlockMoveAv'] = 14
vuln_map['DestAvNearNull'] = 15
vuln_map['SourceAvNearNull'] = 16
vuln_map['FloatingPointException'] = 17
vuln_map['BenignSignal'] = 18
vuln_map['SourceAv'] = 19
vuln_map['AbortSignal'] = 20
vuln_map['AccessViolation'] = 21
vuln_map['UncategorizedSignal'] = 22


# create a vulnerability map based upon cert exploitable
def createVulnConfigMap(vul_config_filename):
    vuln_config_map = {}
    fn = open(vul_config_filename, 'r')
    for line in fn:
        if len(line) == 0 or line[0] == '#':
            continue
        fields = line.rstrip().split(',')
        if len(fields) < 3:
            print "Wrong vulnerability configuration file"
            return None
        vuln_config_map[fields[0].lower()] = [int(fields[-2]), int(fields[-1])]
    fn.close()

    return vuln_config_map

# create a vulnerability fuzzer map based upon cert exploitable.
# per line: vulnerability type, 
def createVulnFuzzerConfigMap(vul_fuzzer_config_filename):
    vuln_fuzzer_config_map = {}
    #vuln_set = set([])
    #fuzzer_set = set([])

    fn = open(vul_fuzzer_config_filename, 'r')
    for line in fn:
        if len(line) == 0:
            continue
        if line[0] == '#':
            if line.find('=') != -1:
                fields = line.rstrip().split('=')
                if fields[0].lower() == "#vuln":
                    vuln_set = set(fields[1].split(','))
                elif fields[0].lower() == "#fuzzer":
                    fuzzer_set = set(fields[1].split(','))
                continue
            else:
                continue

        fields = line.rstrip().split(',')
        if len(fields) < 4:
            print "Wrong vulnerability configuration file"
            return None
        if fields[0] == '*':
            my_vuln_set = vuln_set
        else:
            my_vuln_set = set([fields[0]])

        if fields[1] == '*':
            my_fuzzer_set = fuzzer_set
        else:
            my_fuzzer_set = set([fields[1]])

        print "checking...", my_vuln_set, my_fuzzer_set
        for vuln in my_vuln_set:
            for fuzzer in my_fuzzer_set:
                vuln_fuzzer_config_map[(vuln.lower(), fuzzer.lower())] = [int(fields[-2]), int(fields[-1])]
    fn.close()

    return vuln_fuzzer_config_map

# for each vuln, we use the percentage of apps tested as positive as the prior
def evaluate_prior_belief(vuln_hit_map, total_apps_tested, prior_a, prior_b):
    vuln_prob_map = {}
    for vuln in vuln_hit_map.keys():
	vuln_prob_map[vuln] = 1.0 - 1.0 * (prior_a + len(vuln_hit_map[vuln])) / (prior_b + total_apps_tested) # no previous apps tested
        print "prior:", vuln, vuln_prob_map[vuln], prior_a, prior_b, total_apps_tested,  len(vuln_hit_map[vuln])
    
    return vuln_prob_map
    

# evaluate the posterior belief with machine learning
def evaluate_posterior_belief_with_ml(vuln_prob_map, apppath, cur_clf_map, empty_feature_map, fuzz_vuln_set, ml_perf_map):
    print "evaluate_prior_belief", vuln_prob_map, apppath, cur_clf_map, empty_feature_map
    feature_vec = bin_pca.collect_feature_values(apppath, feature_type_list, copy.copy(empty_feature_map), True)
    print "len of feature_vec is", len(feature_vec)

    for vuln in vuln_prob_map.keys():
	clf = cur_clf_map[vuln]
        if None == clf:
            continue
        
        # the prediction result from the classification model
        target = clf.predict([feature_vec])[0]
        prior = vuln_prob_map[vuln]
        print "hmmmm: target, prior =", target, prior
        
	if target:
	    # prediction is true. observation is positive
            
            # update posterior belief if necessary
            if ml_perf_map[vuln][1] + ml_perf_map[vuln][2] > 0 and ml_perf_map[vuln][0] + ml_perf_map[vuln][3] > 0:
                false_positive_rate = 1.0 * ml_perf_map[vuln][1] / (ml_perf_map[vuln][1] + ml_perf_map[vuln][2])
                upper = prior * false_positive_rate
                bottom = upper + (1.0 - prior) * ml_perf_map[vuln][0] / (ml_perf_map[vuln][0] + ml_perf_map[vuln][3])
                if bottom > 0:
                    vuln_prob_map[vuln] = upper / bottom

            # update performance counters of the machine learning predictor
	    if vuln in fuzz_vuln_set:
		# prediction is correct
		ml_perf_map[vuln][0] += 1 # true positive
	    else:
		# prediction positive, fuzz negative
		ml_perf_map[vuln][1] += 1 # false positive
	else:
	    # prediction is false. observation is negative

            # update posterior belief if necessary
            if ml_perf_map[vuln][1] + ml_perf_map[vuln][2] > 0 and ml_perf_map[vuln][0] + ml_perf_map[vuln][3] > 0:
                true_negative_rate = 1.0 * ml_perf_map[vuln][2] / (ml_perf_map[vuln][2] + ml_perf_map[vuln][1])
                upper = prior * true_negative_rate
                bottom = upper + (1.0 - prior) * ml_perf_map[vuln][3] / (ml_perf_map[vuln][0] + ml_perf_map[vuln][3])
                if bottom > 0:
                    vuln_prob_map[vuln] = upper / bottom

            # update performance counters of the machine learning predictor
	    if vuln in fuzz_vuln_set:
		# prediction is wrong
		ml_perf_map[vuln][3] += 1 # false negative 
	    else:
		# prediction negative, fuzz negative
		ml_perf_map[vuln][2] += 1 # true negative
	
    return
	
# calculate \product (1 - q(r,v)) for all fuzzers. In Eq.(8)
def calc_product_q(vuln_fuzzer_config_map, vuln, fuzzer):
    product = 1.0

    vuln_fuzzer = (vuln.lower(), fuzzer.lower())
    if vuln_fuzzer in vuln_fuzzer_config_map.keys():
        val =  vuln_fuzzer_config_map[vuln_fuzzer] 
        product *= 1 - 1.0 * val[0] / val[1]
    else:
        print "Can't find vuln-fuzzer pair in the configuration file"
        system.exit()
        
    #print product
    return product

# Eq. (9)
def calc_exploitability(vuln_prob_map, vuln_config_map):
    product = 1.0
    for vuln in vuln_prob_map.keys():
        a_b = vuln_config_map[vuln]
        r_v = 1.0 * a_b[0] / (a_b[0] + a_b[1])
        product *= 1 - r_v + r_v * vuln_prob_map[vuln]
        #print "calc_exploitability", vuln, a_b, r_v, product
    return 1 - product

def evaluate_posterior_belief_with_fuzzing(app, fuzzer, vuln_found_map, vuln_config_map, vuln_fuzzer_config_map, vuln_prob_map):
    #vuln_prob_map = evaluate_prior_belief(app, vuln_config_map.keys(), cur_w_list)
    for vuln in vuln_config_map.keys():
        if fuzzer in vuln_found_map[vuln]:
            vuln_prob_map[vuln] = 0.0
        else:
            prior = vuln_prob_map[vuln]
            product = calc_product_q(vuln_fuzzer_config_map, vuln, fuzzer)
	    # seems there is an error in here.
            #posterior = prior / (prior + (1 - prior) * (1 - product)) 
            posterior = prior / (prior + (1 - prior) * (product))
            if app == 'abiword':
                print "abiword!: vuln, prior, product, posterior =", vuln, prior, product, posterior

            vuln_prob_map[vuln] = posterior
        #print vuln, vuln_prob_map[vuln]

        print "Posterior belief: ", app, vuln, vuln_prob_map[vuln]

    return
    #exploitability = calc_exploitability(vuln_prob_map, vuln_config_map)
    #return exploitability

def update_vuln_fuzzer_configmap(fuzzer_list, vuln_found_map, vuln_fuzzer_config_map):
    for vuln in vuln_found_map.keys():
        if len(vuln_found_map[vuln]) == 0:
            continue
        for fuzzer in fuzzer_list:
            vuln_fuzzer_config_map[(vuln, fuzzer)][1] += 1
            if fuzzer in vuln_found_map[vuln]:
                # fuzzer has found this vulnerability type
                vuln_fuzzer_config_map[(vuln, fuzzer)][0] += 1
                #print "Updating vuln_fuzzer_config_map..."
    return

if __name__ == "__main__":    
    parser = argparse.ArgumentParser(description="Generate a vulnerability table for a software")

    parser.add_argument("vuln_config", help="configuration file for vulnerability")
    parser.add_argument("vuln_fuzzer_config", help="configuration file for vulnerability and fuzzer")
    parser.add_argument("inputfile", help="input file that contains the fuzzing results")
    parser.add_argument("scorefile", help="the output file that stores the score per vulnerability")
    #parser.add_argument("pcatrainpath", help="where the features are for pca training")
    #parser.add_argument("ncomponents", help="the number of principal components")
    parser.add_argument("apppath", help="where the features of the apps are located")
    #parser.add_argument("default_prior", help="the default prior belief")
    #parser.add_argument("prior_scale", help="the scale of prior (0 - 1)")
    parser.add_argument("epoch_length", help="the length of each epoch")
    parser.add_argument("classifier", help="the classifier name")
    parser.add_argument("feature_types", help="types of features used")
    parser.add_argument("random_seed", help="the random seeds used. if negative, then time is used.")
    parser.add_argument("conf_threshold", help="the threshold for confidence")
    parser.add_argument("output_file", help="the output filename")
    args = parser.parse_args()    

    #(pca, empty_feature_map) = bin_pca.train_pca_model(args.pcatrainpath, int(args.ncomponents))
    #print "size of empty_feature_map is", len(empty_feature_map)

    random_seed = int(args.random_seed)
    if random_seed < 0:
        random.seed(datetime.now())
    else:
        random.seed(random_seed)

    epoch_length = int(args.epoch_length)
    clf_name = args.classifier
    feature_type_list = args.feature_types.split(',')
    conf_threshold = float(args.conf_threshold)
    
    # the global list of vulnerability types
    vuln_config_map = createVulnConfigMap(args.vuln_config)
    vuln_fuzzer_config_map = createVulnFuzzerConfigMap(args.vuln_fuzzer_config)

    print "vuln_config_map =", vuln_config_map
    print "vuln_fuzzer_config_map =", vuln_fuzzer_config_map
    #sys.exit()

    cur_clf_map = None
    vuln_hit_map = {}
    target_vuln_map = {}
    ml_perf_map = {}
    reverse_vuln_map = {}

    for key in vuln_map.keys():
        reverse_vuln_map[ vuln_map[key] ] = key
        print vuln_map[key], key
	ml_perf_map[key.lower()] = [0, 0, 0, 0]  # true positive, false positive, true negative, false negative
	vuln_hit_map[key.lower()] = set([])
        target_vuln_map[key.lower()] = []
        
    cnt = 0
    cur_scorefile = ""

    # prediction accuracy measurement (individual)
    prediction_prior_map = {}       # performance of prediction based on prior info (percentage of software that has a vulnerability type)
    prediction_ml_map = {}          # performance of prediction based on machine learning
    prediction_prior_ml_map = {}    # performance of prediction based on prior + machine learning
    prediction_prior_ml_fuzzer_map = {}    # performance of prediction based on prior + machine learning + one fuzzer
    prediction_bff_map = {}         # performance of prediction based on bff
    prediction_ofuzz_map = {}       # performance of prediction based on ofuzz
    for key in vuln_map.keys():
        prediction_prior_map[key.lower()] = [ 0, 0, 0, 0 ]   # true positive, false positive, true negative, false negative
        prediction_ml_map[key.lower()] = [ 0, 0, 0, 0 ]
        prediction_prior_ml_map[key.lower()] = [ 0, 0, 0, 0 ]
        prediction_prior_ml_fuzzer_map[key.lower()] = [ 0, 0, 0, 0 ]
        prediction_bff_map[key.lower()] = [ 0, 0, 0, 0 ]
        prediction_ofuzz_map[key.lower()] = [ 0, 0, 0, 0 ]
        
    with open(args.inputfile,'r') as source:
        data = [ (random.random(), line) for line in source ]
    data.sort()
    random_inputfile = args.inputfile + '.random'
    with open(random_inputfile, 'w') as target:
        for _, line in data:
            target.write( line )

    fuzzer_list = [ 'bff', 'ofuzz' ]
    inputfn = open(random_inputfile, 'r')
    apppath_list = []
    epoch_id = -1

    for line in inputfn:
        cnt += 1
        if cnt % epoch_length == 1:  
            #cur_scorefile = args.scorefile + "." + str(cnt) + "-" + str(cnt - 1 + 10)
	    epoch_id = int(cnt / epoch_length)
            cur_scorefile = args.scorefile + ".append" 
            #print >> output_scorefile_fn, "#software",
	    if epoch_id == 0:
                output_scorefile_fn = open(cur_scorefile, 'w+') # write mode
                output_scorefile_fn.write("#epoch,software")
                for vuln in vuln_config_map.keys():
       	            output_scorefile_fn.write("," + vuln)
            	output_scorefile_fn.write('\n')
	    else:
		output_scorefile_fn = open(cur_scorefile, 'a+') # append mode
                
        fields = line.rstrip().split('&')
        app = fields[0]
        apppath = os.path.join(args.apppath, app)
        apppath_list.append(apppath)
        vuln_found_map = {}

        for key in vuln_map.keys():
            vuln_found_map[key.lower()] = set([])
        
        bff_results = fields[2].split(',')
        hit_vuln_set = set([])
        for vuln_id in bff_results:
            if len(vuln_id) <= 0:
                continue
            vuln = reverse_vuln_map[int(vuln_id)].lower()
            hit_vuln_set.add( vuln )
            if vuln in vuln_found_map.keys():
                vuln_found_map[ vuln ].add('bff')
            else:
                vuln_found_map[ vuln ] = set(['bff'])
        ofuzz_results = fields[4].split(',')
        for vuln_id in ofuzz_results:
            if len(vuln_id) <= 0:
                continue
            vuln = reverse_vuln_map[int(vuln_id)].lower()
            hit_vuln_set.add(vuln)
            if vuln in vuln_found_map.keys():
                vuln_found_map[ vuln ].add('ofuzz')
            else:
                vuln_found_map[ vuln ] = set(['ofuzz'])

        # update the prediction based on prior
        for vuln in vuln_found_map.keys():
            # fuzzer 1 (bff)
            if not 'bff' in vuln_found_map[vuln]:
                # not vulnerable, prediction is negative.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_bff_map[vuln][3] += 1 # false negative
                else:
                    # fuzzing says it is negative
                    prediction_bff_map[vuln][2] += 1 # true negative
            else:
                # vulnerable, prediction is positive.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_bff_map[vuln][0] += 1 # true positive
                else:
                    # fuzzing says it is negative
                    prediction_bff_map[vuln][1] += 1 # false positive

            # fuzzer 2 (ofuzz)
            if not 'ofuzz' in vuln_found_map[vuln]:
                # not vulnerable, prediction is negative.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_ofuzz_map[vuln][3] += 1 # false negative
                else:
                    # fuzzing says it is negative
                    prediction_ofuzz_map[vuln][2] += 1 # true negative
            else:
                # vulnerable, prediction is positive.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_ofuzz_map[vuln][0] += 1 # true positive
                else:
                    # fuzzing says it is negative
                    prediction_ofuzz_map[vuln][1] += 1 # false positive
                
	for vuln in hit_vuln_set:
	    vuln_hit_map[vuln].add(app) 
	for vuln in vuln_hit_map.keys():
	    if vuln in hit_vuln_set:
		target_vuln_map[vuln].append(True)
	    else:
		target_vuln_map[vuln].append(False)

        # calculate the prior beliefs
	vuln_prob_map = evaluate_prior_belief(vuln_hit_map, cnt - 1, 1, 2) # prior_a = 1, prior_b = 2, so 0.5 for the first app
        exploitability = calc_exploitability(vuln_prob_map, vuln_config_map)
        print "EXPLOITABILITY prior ", app, exploitability
        
        # update the prediction based on prior
        for vuln in vuln_prob_map.keys():
            if vuln_prob_map[vuln] > conf_threshold:
                # not vulnerable, prediction is negative.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_prior_map[vuln][3] += 1 # false negative
                else:
                    # fuzzing says it is negative
                    prediction_prior_map[vuln][2] += 1 # true negative
            else:
                # vulnerable, prediction is positive.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_prior_map[vuln][0] += 1 # true positive
                else:
                    # fuzzing says it is negative
                    prediction_prior_map[vuln][1] += 1 # false positive
                    
	# for the first epoch, no ml model is available yet.
        if cur_clf_map != None:
	    evaluate_posterior_belief_with_ml(vuln_prob_map, apppath, cur_clf_map, empty_feature_map, hit_vuln_set , ml_perf_map)
            
        exploitability = calc_exploitability(vuln_prob_map, vuln_config_map)
        print "EXPLOITABILITY ML", app, exploitability
            
            
        # update the prediction based on prior+ml
        for vuln in vuln_prob_map.keys():
            if vuln_prob_map[vuln] > conf_threshold:
                # not vulnerable, prediction is negative.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_prior_ml_map[vuln][3] += 1 # false negative
                else:
                    # fuzzing says it is negative
                    prediction_prior_ml_map[vuln][2] += 1 # true negative
            else:
                # vulnerable, prediction is positive.
                if vuln in hit_vuln_set:
                    # fuzzing says it is positive
                    prediction_prior_ml_map[vuln][0] += 1 # true positive
                else:
                    # fuzzing says it is negative
                    prediction_prior_ml_map[vuln][1] += 1 # false positive

            
        # bayesian evaluation
        #score = evaluate_posterior_belief_with_fuzzing(app, fuzzer_list, vuln_found_map, vuln_config_map, vuln_fuzzer_config_map, vuln_prob_map)
        one_fuzzer = 0
        for fuzzer in fuzzer_list:
            evaluate_posterior_belief_with_fuzzing(app, fuzzer, vuln_found_map, vuln_config_map, vuln_fuzzer_config_map, vuln_prob_map)
            exploitability = calc_exploitability(vuln_prob_map, vuln_config_map)
            print "EXPLOITABILITY", fuzzer, app, exploitability
            
            if one_fuzzer == 0:
                for vuln in vuln_prob_map.keys():
                    if vuln_prob_map[vuln] > conf_threshold:
                        # not vulnerable, prediction is negative.
                        if vuln in hit_vuln_set:
                            # fuzzing says it is positive
                            prediction_prior_ml_fuzzer_map[vuln][3] += 1 # false negative
                        else:
                            # fuzzing says it is negative
                            prediction_prior_ml_fuzzer_map[vuln][2] += 1 # true negative
                    else:
                        # vulnerable, prediction is positive.
                        if vuln in hit_vuln_set:
                            # fuzzing says it is positive
                            prediction_prior_ml_fuzzer_map[vuln][0] += 1 # true positive
                        else:
                            # fuzzing says it is negative
                            prediction_prior_ml_fuzzer_map[vuln][1] += 1 # false positive
            
            one_fuzzer += 1
            
        # update the score file
	output_scorefile_fn.write(str(epoch_id) + ",") 
        output_scorefile_fn.write(app)
        for vuln in vuln_config_map.keys():
            output_scorefile_fn.write("," + str(vuln_prob_map[vuln]))
        output_scorefile_fn.write('\n')

        #print "Quantitative score of app", app, "is", score
        update_vuln_fuzzer_configmap(fuzzer_list, vuln_found_map, vuln_fuzzer_config_map)
        if cnt % 10 == 0:
            print cnt, "vuln_fuzzer_config_map =", vuln_fuzzer_config_map

        # check if we need to retrain the regression model
        if cnt % epoch_length == 0:            
            # close the score file
            output_scorefile_fn.close()
            
            # train the classification model
            (cur_clf_map, empty_feature_map) = bin_pca.train_classification_model(apppath_list, target_vuln_map, clf_name, feature_type_list)


    # eventually report prediction accuracy
    print "================= Prediction Accuracy ==================="
    #for vuln in prediction_prior_map.keys():

    output_fn = open(args.output_file, 'w')
    
    for vuln_id in reverse_vuln_map.keys():
        #reverse_vuln_map[ vuln_map[key] ] = key

        vuln = reverse_vuln_map[ vuln_id ].lower()
        # prediction performance of prior
        if prediction_prior_map[vuln][0] + prediction_prior_map[vuln][1] > 0 and prediction_prior_map[vuln][0] + prediction_prior_map[vuln][3] > 0 and prediction_prior_map[vuln][0] > 0:
            prior_precision = 1.0 * prediction_prior_map[vuln][0] / (prediction_prior_map[vuln][0] + prediction_prior_map[vuln][1])
            prior_recall = 1.0 * prediction_prior_map[vuln][0] / (prediction_prior_map[vuln][0] + prediction_prior_map[vuln][3])
            prior_f1 = 2 * ((prior_precision * prior_recall)/(prior_precision + prior_recall))
            print >> output_fn, vuln_id, vuln, "prior: (TP, FP, TN, FN, precision, recall, F1) =", prediction_prior_map[vuln][0], prediction_prior_map[vuln][1], prediction_prior_map[vuln][2], prediction_prior_map[vuln][3], prior_precision, prior_recall, prior_f1
        else:
            print >> output_fn, vuln_id, vuln, "prior: (TP, FP, TN, FN, precision, recall, F1) =", prediction_prior_map[vuln][0], prediction_prior_map[vuln][1], prediction_prior_map[vuln][2], prediction_prior_map[vuln][3], 'None', 'None', 'None'

        """
        # prediction performance of ML
        if ml_perf_map[vuln][0] + ml_perf_map[vuln][1] > 0 and ml_perf_map[vuln][0] + ml_perf_map[vuln][3] > 0 and ml_perf_map[vuln][0] > 0:
            ml_precision = 1.0 * ml_perf_map[vuln][0] / (ml_perf_map[vuln][0] + ml_perf_map[vuln][1])
            ml_recall = 1.0 * ml_perf_map[vuln][0] / (ml_perf_map[vuln][0] + ml_perf_map[vuln][3])
            ml_f1 = 2 * ((ml_precision * ml_recall)/(ml_precision + ml_recall))
            print vuln_id, vuln, "ML: (TP, FP, TN, FN, precision, recall, F1) =", ml_perf_map[vuln][0], ml_perf_map[vuln][1], ml_perf_map[vuln][2], ml_perf_map[vuln][3], ml_precision, ml_recall, ml_f1
        else:
            print vuln_id, vuln, "ML: (TP, FP, TN, FN) =", ml_perf_map[vuln][0], ml_perf_map[vuln][1], ml_perf_map[vuln][2], ml_perf_map[vuln][3]
        """

        # prediction performance of prior_ml
        if prediction_prior_ml_map[vuln][0] + prediction_prior_ml_map[vuln][1] > 0 and prediction_prior_ml_map[vuln][0] + prediction_prior_ml_map[vuln][3] > 0 and prediction_prior_ml_map[vuln][0] > 0:
            prior_ml_precision = 1.0 * prediction_prior_ml_map[vuln][0] / (prediction_prior_ml_map[vuln][0] + prediction_prior_ml_map[vuln][1])
            prior_ml_recall = 1.0 * prediction_prior_ml_map[vuln][0] / (prediction_prior_ml_map[vuln][0] + prediction_prior_ml_map[vuln][3])
            prior_ml_f1 = 2 * ((prior_ml_precision * prior_ml_recall)/(prior_ml_precision + prior_ml_recall))
            print >> output_fn, vuln_id, vuln, "prior_ML: (TP, FP, TN, FN, precision, recall, F1) =", prediction_prior_ml_map[vuln][0], prediction_prior_ml_map[vuln][1], prediction_prior_ml_map[vuln][2], prediction_prior_ml_map[vuln][3], prior_ml_precision, prior_ml_recall, prior_ml_f1
        else:
            print >> output_fn, vuln_id, vuln, "prior_ML: (TP, FP, TN, FN, precision, recall, F1) =", prediction_prior_ml_map[vuln][0], prediction_prior_ml_map[vuln][1], prediction_prior_ml_map[vuln][2], prediction_prior_ml_map[vuln][3], 'None', 'None', 'None'

        # prediction performance of prior_ml_fuzzer
        if prediction_prior_ml_fuzzer_map[vuln][0] + prediction_prior_ml_fuzzer_map[vuln][1] > 0 and prediction_prior_ml_fuzzer_map[vuln][0] + prediction_prior_ml_fuzzer_map[vuln][3] > 0 and prediction_prior_ml_fuzzer_map[vuln][0] > 0:
            prior_ml_fuzzer_precision = 1.0 * prediction_prior_ml_fuzzer_map[vuln][0] / (prediction_prior_ml_fuzzer_map[vuln][0] + prediction_prior_ml_fuzzer_map[vuln][1])
            prior_ml_fuzzer_recall = 1.0 * prediction_prior_ml_fuzzer_map[vuln][0] / (prediction_prior_ml_fuzzer_map[vuln][0] + prediction_prior_ml_fuzzer_map[vuln][3])
            prior_ml_fuzzer_f1 = 2 * ((prior_ml_fuzzer_precision * prior_ml_fuzzer_recall)/(prior_ml_fuzzer_precision + prior_ml_fuzzer_recall))
            print >> output_fn, vuln_id, vuln, "prior_ML_fuzzer: (TP, FP, TN, FN, precision, recall, F1) =", prediction_prior_ml_fuzzer_map[vuln][0], prediction_prior_ml_fuzzer_map[vuln][1], prediction_prior_ml_fuzzer_map[vuln][2], prediction_prior_ml_fuzzer_map[vuln][3], prior_ml_fuzzer_precision, prior_ml_fuzzer_recall, prior_ml_fuzzer_f1
        else:
            print >> output_fn, vuln_id, vuln, "prior_ml_fuzzer: (TP, FP, TN, FN, precision, recall, F1) =", prediction_prior_ml_fuzzer_map[vuln][0], prediction_prior_ml_fuzzer_map[vuln][1], prediction_prior_ml_fuzzer_map[vuln][2], prediction_prior_ml_fuzzer_map[vuln][3]
            
        # prediction performance of bff
        if prediction_bff_map[vuln][0] + prediction_bff_map[vuln][1] > 0 and prediction_bff_map[vuln][0] + prediction_bff_map[vuln][3] > 0 and prediction_bff_map[vuln][0] > 0:
            bff_precision = 1.0 * prediction_bff_map[vuln][0] / (prediction_bff_map[vuln][0] + prediction_bff_map[vuln][1])
            bff_recall = 1.0 * prediction_bff_map[vuln][0] / (prediction_bff_map[vuln][0] + prediction_bff_map[vuln][3])
            bff_f1 = 2 * ((bff_precision * bff_recall)/(bff_precision + bff_recall))
            print >> output_fn, vuln_id, vuln, "bff: (TP, FP, TN, FN, precision, recall, F1) =", prediction_bff_map[vuln][0], prediction_bff_map[vuln][1], prediction_bff_map[vuln][2], prediction_bff_map[vuln][3], bff_precision, bff_recall, bff_f1
        else:
            print >> output_fn, vuln_id, vuln, "bff: (TP, FP, TN, FN, precision, recall, F1) =", prediction_bff_map[vuln][0], prediction_bff_map[vuln][1], prediction_bff_map[vuln][2], prediction_bff_map[vuln][3], 'None', 'None', 'None'

        # prediction performance of ofuzz
        if prediction_ofuzz_map[vuln][0] + prediction_ofuzz_map[vuln][1] > 0 and prediction_ofuzz_map[vuln][0] + prediction_ofuzz_map[vuln][3] > 0 and prediction_ofuzz_map[vuln][0] > 0:
            ofuzz_precision = 1.0 * prediction_ofuzz_map[vuln][0] / (prediction_ofuzz_map[vuln][0] + prediction_ofuzz_map[vuln][1])
            ofuzz_recall = 1.0 * prediction_ofuzz_map[vuln][0] / (prediction_ofuzz_map[vuln][0] + prediction_ofuzz_map[vuln][3])
            ofuzz_f1 = 2 * ((ofuzz_precision * ofuzz_recall)/(ofuzz_precision + ofuzz_recall))
            print >> output_fn, vuln_id, vuln, "ofuzz: (TP, FP, TN, FN, precision, recall, F1) =", prediction_ofuzz_map[vuln][0], prediction_ofuzz_map[vuln][1], prediction_ofuzz_map[vuln][2], prediction_ofuzz_map[vuln][3], ofuzz_precision, ofuzz_recall, ofuzz_f1
        else:
            print >> output_fn, vuln_id, vuln, "ofuzz: (TP, FP, TN, FN, precision, recall, F1) =", prediction_ofuzz_map[vuln][0], prediction_ofuzz_map[vuln][1], prediction_ofuzz_map[vuln][2], prediction_ofuzz_map[vuln][3], 'None', 'None', 'None'

        print "----------------------------------------------------------------"


    output_fn.close()
