diff --git a/Collective-Risk_Dilemma.py b/Collective-Risk_Dilemma.py
deleted file mode 100755
index e8bc02c750a20c694d5189a2842b2b1b8ac5bedd..0000000000000000000000000000000000000000
--- a/Collective-Risk_Dilemma.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# NOTES:
-#
-
-
-# Imports ______________________________________________________
-
-import matplotlib.pyplot as plt
-import numpy as np
-import math
-
-
-# Constants ____________________________________________________
-
-N = 100  # Size of the population
-M = 6  # Number of individuals (from the population)
-R = 10  # Total number of rounds in one game
-T = M * R  # Target Sum
-GEN = 10 # Number of generations
-G = 1000  # Number of games per generation
-PRISK = 0.9
-BETA = 1  # Used in fitness (measures the intensity of selection)
-MU = 0.03  # Error probability
-SIGMA = 0.15  # Standard deviation for Gaussian noise on thresholds
-DELTA = 0.0 # Interest
-
-# Code _________________________________________________________
-
-class Collective_Risk_Dilemma():
-    def __init__(self, prisk=PRISK, sigma=SIGMA):
-        self.prisk = prisk
-        self.sigma = sigma
-
-        self.payoffs = [[] for i in range(N)]
-        self.fitness = [0] * N
-        self.commonPool = 0
-        self.moneyGiven = [[] for i in range(M)]
-
-
-        #这个东西是用来统计 多个玩家的游戏中,一次游戏(不是一轮)结束后,系统的期待应该是
-        # 每个人都能在每一轮投入1 那么应该所有人都是 cEqualsR 实际不可能, 就看看实际的情况
-        # 有多少人是 free rider ,投入了但是不够, fair share, 还有无私利他的人
-        self.cContributions = [0]*4 #[cEqualsZero, cEqualsR, cBiggerThanR, cSmallerThanR]
-        # Set Players Strategies:
-        self.contributions = np.random.choice(3, size=(N,R,2))  # 0 Defectors, 1 Fair Sharers, 2 Altruists
-
-        # 结果是一个大tuple 里面有 N个小tuple  每个小tuple里面有R个元素  每个元素都是0-1之间
-        self.threshold = np.random.random(size=(N,R))
-        #for n in range(N):
-         #   self.contributions[n] = np.random.choice(3, (R, 2))
-         #   self.threshold[n] = np.random.random(size=R)
-
-    def ComputePayoff(self, player, invested):
-        """
-            At the end of each game, the payoff must be recalculated
-        """
-        if self.commonPool >= T or np.random.random() < np.random.choice([True, False], 1, True, [1 - self.prisk, self.prisk]):
-            return (2 * R) - invested
-        else:
-            return 0
-
-    def SelectPlayers(self):
-        """
-            Randomly select M players from population of size N.
-        """
-        return np.random.choice(N, M)
-
-    def UpdatePopulation(self):
-        """
-            The next generation is selected using the Wright-Fisher process
-            where the individual’s fitness is used to weigh the probability of
-            choosing an individual for the new population.
-        """
-        prob = list(map(lambda x: x / sum(self.fitness), self.fitness))
-        reproduction_selection = np.random.choice(N, N, True, prob)
-        temp_contributions = np.zeros((N,R,2))
-        temp_threshold = np.zeros((N,R))
-        for i, player in enumerate(reproduction_selection):  # Generate errors
-            for r in range(R):
-                if np.random.random() < MU:  # Error on contribution 1
-                    temp_contributions[i,r,0] = np.random.choice(3)
-                else:
-                    temp_contributions[i,r,0] = np.copy(self.contributions[player,r,0])
-                if np.random.random() < MU:  # Error on contribution 2
-                    temp_contributions[i,r,1] = np.random.choice(3)
-                else:
-                    temp_contributions[i,r,1] = np.copy(self.contributions[player,r,1])
-                if np.random.random() < MU:  # Error on threshold
-                    temp_threshold[i][r] = np.random.normal(self.threshold[player,r], self.sigma)
-                else:
-                    temp_threshold[i][r] = np.copy(self.threshold[player,r])
-        self.contributions = np.copy(temp_contributions)
-        self.threshold = np.copy(temp_threshold)
-        self.payoffs = [[] for i in range(N)]
-
-    def Play(self):
-        """
-            Plays 1 Game  (R rounds)
-        """
-        selectedPlayers = self.SelectPlayers()  # Index of the player in the whole population (M)
-        moneyPlayerOwns = [2 * R] * M  # An individual player starts each game with an initial endowment of 2R
-        c = [0] * M  # Total Investment of each player
-        self.moneyGiven = [[] for i in range(M)]   # 要M个 []
-        self.commonPool = 0
-        for r in range(R):  # 对每一轮游戏来说
-            if self.commonPool < T: # If the goal was not reached yet: Put more money; Else: Do Nothing
-                for i, player in enumerate(selectedPlayers):
-                    if self.commonPool / T < self.threshold[player,r]:  # threshold not attained yet
-                        if self.contributions[player,r,0] <= moneyPlayerOwns[i]:  # enough money to put in the commonpool
-                            c[i] += self.contributions[player,r,0]
-                            self.commonPool += self.contributions[player,r,0]
-                            self.moneyGiven[i].append(self.contributions[player,r,0])
-                            moneyPlayerOwns[i] -= self.contributions[player,r,0]
-                        else:  # Else: Put what's left
-                            c[i] += moneyPlayerOwns[i]
-                            self.moneyGiven[i].append(moneyPlayerOwns[i])
-                            self.commonPool += moneyPlayerOwns[i]
-                            moneyPlayerOwns[i] = 0
-
-                    else:  # threshold was attained
-                        if self.contributions[player,r,1] <= moneyPlayerOwns[i]:  # enough money to put in the commonpool
-                            c[i] += self.contributions[player,r,1]
-                            self.commonPool += self.contributions[player,r,1]
-                            self.moneyGiven[i].append(self.contributions[player,r,1])
-                            moneyPlayerOwns[i] -= self.contributions[player,r,1]
-                        else:  # Else: Put what's left
-                            c[i] += moneyPlayerOwns[i]
-                            self.moneyGiven[i].append(moneyPlayerOwns[i])
-                            self.commonPool += moneyPlayerOwns[i]
-                            moneyPlayerOwns[i] = 0
-            self.commonPool += self.commonPool*DELTA
-
-        for i, player in enumerate(selectedPlayers):
-            self.payoffs[player].append(self.ComputePayoff(i, c[i]))
-
-        # cContributions = [cEqualsZero, cEqualsR, cBiggerThanR, cSmallerThanR]
-        for i in range(M):
-            if c[i] == 0:
-                self.cContributions[0] += 1
-            elif c[i] == R:
-                self.cContributions[1] += 1
-            elif c[i] > R:
-                self.cContributions[2] += 1
-            elif c[i] < R:
-                self.cContributions[3] += 1
-            else: raise Exception
-
-
-        if T < self.commonPool: return 1 # If target was reached
-        else: return 0
-
-
-
-
-
-
-
-    #def GetPayoffs(self):
-    #    return self.payoffs
-
-
-    def GetAverageFitness(self):
-        return np.mean(self.fitness)
-
-
-    def GetAveragePayoff(self):
-        payoff_sum = 0
-        for n in range(N):
-            payoff_sum += np.mean(self.payoffs[n])
-        return np.mean(payoff_sum)
-
-    def GetCommonPool(self):
-        return self.commonPool
-
-    def GetcContributions(self):
-        return self.cContributions
-
-
-
-    def UpdateFitness(self):
-        """
-            Receives a list of all the payoffs
-            of a player. Computes the average
-            and returns the exp(Bpii)
-        """
-        for n in range(N):
-            self.fitness[n] = math.exp(BETA * np.mean(self.payoffs[n]))
-
-
-
-
-# Plots ________________________________________________________
-
-
-def SampleTrajectories():
-    # Variables for the plots:
-    ratioAveragePayoff  = []
-    ratioTargetReached  = []
-    ratioAverageContribution   = []
-
-    game = Collective_Risk_Dilemma()  # Initialize one simulation
-    for g in range(GEN):
-        print("Playing Generation", g)
-        timesReachedTarget = 0
-        for i in range(G):
-            timesReachedTarget += game.Play()  # Play one game (10 rounds)       
-
-        ratioAveragePayoff.append(game.GetAveragePayoff() / (2*G)) # 2*G is the max payoff (to obtain a value between 0 and 1)
-        ratioTargetReached.append(timesReachedTarget / G)
-        ratioAverageContribution.append(game.GetCommonPool() / (2*N))
-
-        game.UpdateFitness()
-        game.UpdatePopulation()  # Create a new population based on fitness
-
-    plt.plot([x for x in range(len(ratioAveragePayoff))], ratioAveragePayoff, label="Payoff")
-    plt.plot([x for x in range(len(ratioTargetReached))], ratioTargetReached, label="Target Reached")
-    plt.plot([x for x in range(len(ratioAverageContribution))], ratioAverageContribution, label="Contribution")
-    plt.xlabel('Generation')
-    plt.ylabel('Proportion')
-    plt.title('Sample trajectories for the evolutionary dynamics in collective-risk dilemmas')
-    plt.legend()
-    plt.show()
-
-
-
-
-
-
-
-
-def SummaryEvolutionaryDynamics1():
-
-    risks = [i*(1/20) for i in range(21)]
-    payoffRisk = []
-    contributionRisk = []
-    targetRisk = []
-    firstHalf = []
-    secondHalf = []
-
-    for k in risks:
-        print("PRISK", k)
-        ratioPayoff = []
-        ratioTarget = []
-        ratioContribution = []
-        game = Collective_Risk_Dilemma(k) # (PRISK, SIGMA)
-        for g in range(GEN):
-            print("Playing Generation", g)
-            timesReachedTarget = 0
-            for i in range(G):
-                timesReachedTarget += game.Play()  # Play one game (10 rounds) 
-
-            ratioPayoff.append(game.GetAveragePayoff() / (2*G))
-            ratioTarget.append(timesReachedTarget / G)
-            ratioContribution.append(game.GetCommonPool() / (2*N))
-
-            game.UpdateFitness()
-            game.UpdatePopulation()  # Create a new population based on fitness
-
-        payoffRisk.append(np.mean(ratioPayoff))
-        contributionRisk.append(np.mean(ratioContribution))
-        targetRisk.append(np.mean(ratioTarget))
-        firstHalf.append(np.mean(ratioContribution[:len(ratioContribution)//2]))
-        secondHalf.append(np.mean(ratioContribution[len(ratioContribution)//2:]))
-
-    plt.plot(risks, payoffRisk, label="Payoff")
-    plt.plot(risks, contributionRisk, label="Contribution")
-    plt.plot(risks, targetRisk, label="Target")
-    plt.plot(risks, firstHalf, label="1st Half")
-    plt.plot(risks, secondHalf, label="2nd Half")
-    plt.xlabel('Risk Probability')
-    plt.ylabel('Proportion')
-    plt.title('Summary of the evolutionary dynamics in collective-risk dilemmas')
-    plt.legend()
-    plt.show()
-
-
-
-
-
-
-def SummaryEvolutionaryDynamics2():
-
-    risks = [i*(1/20) for i in range(21)]
-    cEqualsZero, cEqualsR, cBiggerThanR, cSmallerThanR = [],[],[],[]
-
-    for k in risks:
-        print("PRISK", k)
-        game = Collective_Risk_Dilemma(k) # (PRISK, SIGMA)
-        for g in range(GEN):
-            print("Playing Generation", g)
-            timesReachedTarget = 0
-            for i in range(G):
-                timesReachedTarget += game.Play()  # Play one game (10 rounds)
-
-            game.UpdateFitness()
-            game.UpdatePopulation()  # Create a new population based on fitness
-
-        # cContributions = [cEqualsZero, cEqualsR, cBiggerThanR, cSmallerThanR]
-        contribution = game.GetcContributions()
-        totalContributions = 0
-        for cont in contribution:
-            totalContributions += cont
-
-        cEqualsZero.append(contribution[0]/totalContributions)
-        cEqualsR.append(contribution[1]/totalContributions)
-        cBiggerThanR.append(contribution[2]/totalContributions)
-        cSmallerThanR.append(contribution[3]/totalContributions)
-        
-    plt.plot(risks, cEqualsZero, label="C = 0")
-    plt.plot(risks, cEqualsR, label="C = R")
-    plt.plot(risks, cBiggerThanR, label="C > R")
-    plt.plot(risks, cSmallerThanR, label="C < R")
-    plt.xlabel('Risk Probability')
-    plt.ylabel('Frequency of the Behaviour')
-    plt.title('Summary of the evolutionary dynamics in collective-risk dilemmas')
-    plt.legend()
-    plt.show()
-
-
-
-
-
-
-# ______________________________________________________________
-
-def main():
-    #SampleTrajectories()
-    # SummaryEvolutionaryDynamics1()
-    SummaryEvolutionaryDynamics2()
-
-
-
-if __name__ == '__main__':
-    main()
-
-
diff --git a/Stability_of_behaviors.py b/Stability_of_behaviors.py
deleted file mode 100755
index 0ee02ca4b553f6269eea902a8728402b8da38e27..0000000000000000000000000000000000000000
--- a/Stability_of_behaviors.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# NOTES:
-#
-
-
-# Imports ______________________________________________________
-
-import matplotlib.pyplot as plt
-import numpy as np
-import math
-
-# Constants ____________________________________________________
-
-N = 100  # Size of the population
-M = 6  # Number of individuals (from the population)
-R = 10  # Total number of rounds in one game
-T = M * R  # Target Sum
-GEN = 200 # Number of generations
-G = 1000  # Number of games per generation
-PRISK = 0.9
-BETA = 1  # Used in fitness (measures the intensity of selection)
-MU = 0.03  # Error probability
-SIGMA = 0.15  # Standard deviation for Gaussian noise on thresholds
-
-# Code _________________________________________________________
-
-class Collective_Risk_Dilemma():
-    def __init__(self, robustness=False, prisk=PRISK, sigma=SIGMA):
-        self.prisk = prisk
-        self.sigma = sigma
-
-        self.payoffs = [[] for i in range(N)]
-        self.fitness = [0] * N
-        self.commonPool = 0
-        self.moneyGiven = [[] for i in range(M)]
-        self.cContributions = [0]*4 #[cEqualsZero, cEqualsR, cBiggerThanR, cSmallerThanR]
-        # Set Players Strategies:
-
-        self.contributions = []
-        for i in range(N):
-            self.contributions.append([])
-            choice = np.random.choice(3)
-            for k in range(R//2):
-                self.contributions[i].append(choice)
-            choice = np.random.choice(3)
-            for l in range(R-R//2):
-                self.contributions[i].append(choice)
-
-
-    def ComputePayoff(self, player, invested):
-        """
-            At the end of each game, the payoff must be recalculated
-        """
-        if self.commonPool >= T or np.random.random() < np.random.choice([True, False], 1, True, [1 - self.prisk, self.prisk]):
-            return (2 * R) - invested
-        else:
-            return 0
-
-    def SelectPlayers(self):
-        """
-            Randomly select M players from population of size N.
-        """
-        return np.random.choice(N, M)
-
-    def UpdatePopulation(self):
-        """
-            The next generation is selected using the Wright-Fisher process
-            where the individual’s fitness is used to weigh the probability of
-            choosing an individual for the new population.
-        """
-        prob = list(map(lambda x: x / sum(self.fitness), self.fitness))
-        reproduction_selection = np.random.choice(N, N, True, prob)
-        temp_contributions = np.zeros((N,R,1))
-        temp_threshold = np.zeros((N,R))
-        for i, player in enumerate(reproduction_selection):  # Generate errors
-            for r in range(R):
-                if np.random.random() < MU:  # Error on contribution 1
-                    temp_contributions[i][r] = np.random.choice(3)
-                else:
-                    temp_contributions[i][r] = np.copy(self.contributions[player][r])
-        self.contributions = np.copy(temp_contributions)
-        self.payoffs = [[] for i in range(N)]
-
-    def Play(self):
-        """
-            Plays 1 Game  (R rounds)
-        """
-        selectedPlayers = self.SelectPlayers()  # Index of the player in the whole population (M)
-        moneyPlayerOwns = [2 * R] * M  # An individual player starts each game with an initial endowment of 2R
-        c = [0] * M  # Total Investment of each player
-        self.moneyGiven = [[] for i in range(M)]
-        self.commonPool = 0
-        for r in range(R):
-            if self.commonPool < T: # If the goal was not reached yet: Put more money; Else: Do Nothing
-                for i, player in enumerate(selectedPlayers):
-                    if self.contributions[player][r] <= moneyPlayerOwns[i]:  # enough money to put in the commonpool
-                        c[i] += self.contributions[player][r]
-                        self.commonPool += self.contributions[player][r]
-                        self.moneyGiven[i].append(self.contributions[player][r])
-                        moneyPlayerOwns[i] -= self.contributions[player][r]
-                    else:  # Else: Put what's left
-                        c[i] += moneyPlayerOwns[i]
-                        self.moneyGiven[i].append(moneyPlayerOwns[i])
-                        self.commonPool += moneyPlayerOwns[i]
-                        moneyPlayerOwns[i] = 0
-
-
-        for i, player in enumerate(selectedPlayers):
-            self.payoffs[player].append(self.ComputePayoff(i, c[i]))
-
-        # cContributions = [cEqualsZero, cEqualsR, cBiggerThanR, cSmallerThanR]
-        for i in range(M):
-            if c[i] == 0:
-                self.cContributions[0] += 1
-            elif c[i] == R:
-                self.cContributions[1] += 1
-            elif c[i] > R:
-                self.cContributions[2] += 1
-            elif c[i] < R:
-                self.cContributions[3] += 1
-            else: raise Exception
-
-
-        if T < self.commonPool: return 1 # If target was reached
-        else: return 0
-
-
-    def UpdateFitness(self):
-        """
-            Receives a list of all the payoffs
-            of a player. Computes the average
-            and returns the exp(Bpii)
-        """
-        for n in range(N):
-            self.fitness[n] = math.exp(BETA * np.mean(self.payoffs[n]))
-
-
-    def StillStrategy00(self):
-        return [0]*R in self.contributions
-
-    def StillStrategy11(self):
-        return [1]*R in self.contributions
-
-    def StillStrategy22(self):
-        return [2]*R in self.contributions
-
-    def StillStrategy02(self):
-        return [0]*(R//2)+[2]*(R//2) in self.contributions
-
-    def StillStrategy20(self):
-        return [2]*(R//2)+[0]*(R//2) in self.contributions
-
-    
-            
-
-
-
-# Plots ________________________________________________________
-
-
-def plot():
-    risks = [i*(1/20) for i in range(21)]
-
-    cntl00, cntl11, cntl22, cntl02, cntl20 = [],[],[],[],[]
-
-
-    for k in risks:
-        flag00, flag11, flag22, flag02, flag20 = True, True, True, True, True
-        cnt00, cnt11, cnt22, cnt02, cnt20 = 0, 0, 0, 0, 0
-        print("PRISK", k)
-        game = Collective_Risk_Dilemma(k) # (PRISK, SIGMA)
-        for g in range(GEN):
-            print("Playing Generation", g)
-            timesReachedTarget = 0
-            for i in range(G):
-                timesReachedTarget += game.Play()  # Play one game (10 rounds)
-
-            if flag00 and game.StillStrategy00(): cnt00 += 1
-            else: flag00 = False
-
-            if flag11 and game.StillStrategy11(): cnt11 += 1
-            else: flag11 = False
-
-            if flag22 and game.StillStrategy22(): cnt22 += 1
-            else: flag22 = False
-
-            if flag02 and game.StillStrategy02(): cnt02 += 1
-            else: flag02 = False
-
-            if flag20 and game.StillStrategy20(): cnt20 += 1
-            else: flag20 = False
-
-            game.UpdateFitness()
-            game.UpdatePopulation()  # Create a new population based on fitness
-
-        cntl00.append(cnt00)
-        cntl11.append(cnt11)
-        cntl22.append(cnt22)
-        cntl02.append(cnt02)
-        cntl20.append(cnt20)
-
-
-    plt.plot(risks, cntl00, label="C = 0000000000")
-    plt.plot(risks, cntl11, label="C = 1111111111")
-    plt.plot(risks, cntl22, label="C = 2222222222")
-    plt.plot(risks, cntl02, label="C = 0000022222")
-    plt.plot(risks, cntl20, label="C = 2222200000")
-
-
-    plt.xlabel('Risk Probability')
-    plt.ylabel('Generations')
-    plt.title('Stability of behaviors in a collective-risk dilemma')
-    plt.legend()
-    plt.show()
-
-
-
-
-
-
-# ______________________________________________________________
-
-def main():
-    plot()
-
-
-
-if __name__ == '__main__':
-    main()
-
-
diff --git a/game.py b/game.py
index 7a93bf88a72384ded6f99b8718d9940b6d050ad2..f01410c6bf2b8c15dad04b8b0fa69b9a55f78451 100644
--- a/game.py
+++ b/game.py
@@ -29,7 +29,7 @@ class Game:
         self.iterations = I
 
         """
-        | 2-Player Game Graph Model:
+        | 2-Player Game Graph Model: (Small-world network)
         |
         | P: Probability of rewiring each original edge in the graph
         |
@@ -115,9 +115,11 @@ class Game:
 
                     for p in [i, j]:
                         if np.random.uniform(0, 1) < risk:
-                            lossTable[p, r] += self.alpha / self.graph.getNodesNumber()[p]
+                            lossTable[p, r] += self.alpha / \
+                                               self.graph.getNodesNumber()[p]
                 for i in range(self.N):
-                    self.players[i].updateReward(r, actionTable[i][r], lossTable[i][r])
+                    self.players[i].updateReward(r, actionTable[i][r],
+                                                 lossTable[i][r])
 
             for r in range(self.R):
                 unique, count = np.unique(strategyTable[r], return_counts=True)
diff --git a/main.py b/main.py
index b2db86b1688ce22842d87c76d4bf6179be223acd..923934ef2b8330037d494ab27f103b8272a8c9fe 100644
--- a/main.py
+++ b/main.py
@@ -1,6 +1,6 @@
 """
 This file contains graph methods and t-test implementations. The main
-function should produce all Figures and t-test results as the thesis.
+function should produce all Figures and t-test results in the thesis.
 
 Author: Liyao Zhu  liyao@student.unimelb.edu.au
 Date:   Apr. 2019
@@ -340,112 +340,31 @@ def main():
     #           titleComment="0.999 - decrease", legendLoc='lower left')
 
 
+    """T-tests 1. Average contribution of different T"""
+    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.9, 1.0))    #p=0.2708
+    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.8, 1.0))    #p=0.1096
+    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.7, 1.0))    #p=0.1633
+    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.6, 1.0))    #p=0.2208
 
+    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.5, 1.0))    #p=2.2067e-08
 
-    """
-    Graph1: Number of Actions of Round r (start by 0) by Iteration
-    """
-
-    # RepeatTimes = 30
-    # for N in [5, 10, 20, 50, 100]:
-    #     K = N - 1
-    #     for R in [1, 2, 4]:
-    #         for alpha in [0.2, 0.4, 0.6, 0.8, 1]:
-    #             data = rep(RepeatTimes, R, Actions, I, N=N, K=K, alpha=alpha)
-    #             for r in range(R):
-    #                 stackPlot(data, r, Actions, I, titleComment="N="+ str(N) + ", R=" + str(R) + ", alpha=" +str(alpha) + ", Well-Mixed graph")
-
-
-    # for k in [2, 99]:
-    #     for p in [0.8]:
-    #         data = rep(repeat=30, N=100, K=k, Actions=Actions, R=1, I=I, P=p)
-    #         stackPlot(data, r=0, Iterations=I, Actions=Actions, titleComment=("K=" + str(k) + ", P=" + str(p)))
-
-    """
-    Graph2: Average contribution by K, P
-    """
-    # graph_kp3d(Actions)
-
-    """
-    Graph3: Comparing a parameter (put in a list)
-    """
-    # stackBar(0, Actions, repeat=1, N=[5, 10, 20, 50, 100], threshold=0.6, RF=2)
-    # stackBar(0, Actions, repeat=30, RF=2, threshold=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
-
-    """
-    Graph4: Actions by different epsilon with multi-arm bandit algorithms
-    """
-
-
-    # stackBar(0, Actions, repeat=30, multiArm='greedy', legendLoc='lower right',
-    #          epsilon=[0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
-    #          threshold=0.3, RF=2)
-
-
-    # data = rep(repeat=30, Actions=Actions, multiArm='greedy', epsilon=0.05)
-    # stackPlot(data, r=0, Iterations=I, Actions=Actions, titleComment="0.05 - greedy",legendLoc='lower left')
-    #
-    # data = rep(repeat=30, Actions=Actions, multiArm='greedy', epsilon=0.1)
-    # stackPlot(data, r=0, Iterations=I, Actions=Actions, titleComment="0.1 - greedy",legendLoc='lower left')
-    #
-    # data = rep(repeat=30, Actions=Actions, multiArm='greedy', epsilon=0.2)
-    # stackPlot(data, r=0, Iterations=I, Actions=Actions, titleComment="0.2 - greedy",legendLoc='lower left')
-
-
-
-    # data = rep(repeat=30, Actions=Actions, multiArm='greedy', epsilon=0.6, I=5000)
-    # stackPlot(data, r=0, Iterations=5000, Actions=Actions, titleComment="0.6 - greedy", legendLoc='lower left')
-
-    # data = rep(repeat=30, Actions=Actions, multiArm='decrease', epsilon=0.9)
-    # stackPlot(data, r=0, Iterations=I, Actions=Actions, titleComment="0.9 - decrease",legendLoc='lower left')
-
-    # data = rep(repeat=30, Actions=Actions, multiArm='decrease', epsilon=0.99)
-    # stackPlot(data, r=0, Iterations=I, Actions=Actions, titleComment="0.99 - decrease",legendLoc='lower left')
-    #
-    # data = rep(repeat=30, Actions=Actions, multiArm='decrease', epsilon=0.999)
-    # stackPlot(data, r=0, Iterations=I, Actions=Actions, titleComment="0.999 - decrease",legendLoc='lower left')
-
-
-
-
-
-
-
-
-    """
-    T-Test
-    """
-
-    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.2, 0.3), byThreshold=True)   #p=3.324e-31
-    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.6, 1.0))    #pvalue=0.2208
-    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.8, 1.0))    #pvalue=0.1096
-    # t_test(30, Actions, alpha=1, RF=2, threshold=(0.5, 1.0))    #pvalue=2.2067e-08
-    # t_test(30, Actions, alpha=0.85, RF=2, threshold=(0.2, 0.3), byThreshold=True)   #pvalue=0.005865
-
-    # t_test(30, Actions, alpha=(1, 0.9), RF=2, threshold=0.2)    #pvalue=0.3748
-    # t_test(30, Actions, alpha=(1, 0.85), RF=2, threshold=0.2)   #pvalue=0.001466
-    # t_test(30, Actions, alpha=(1, 0.8), RF=2, threshold=0.2)    #pvalue=0.0002030
-    # t_test(30, Actions, alpha=(1, 0.75), RF=2, threshold=0.2)   #pvalue=3.9617e-07
-    # t_test(30, Actions, alpha=(1, 0.7), RF=2, threshold=0.2)    #pvalue=2.2428e-09
-    # t_test(30, Actions, alpha=(1, 0.65), RF=2, threshold=0.2)   #pvalue=6.8966e-09
-    # t_test(30, Actions, alpha=(1, 0.6), RF=2, threshold=0.2)    #pvalue=7.1621e-15
-    # t_test(30, Actions, alpha=(1, 0.5), RF=2, threshold=0.2)    #pvalue=4.1760e-13
-    # t_test(30, Actions, alpha=(1, 0.45), RF=2, threshold=0.2)   #pvalue=1.3749e-11
-    # t_test(30, Actions, alpha=(1, 0.4), RF=2, threshold=0.2)    #pvalue=3.8352e-19
+    """T-test 2. Average contribution of different alpha when T=0.2"""
 
     # base = repHist(30, Actions, alpha=1, RF=2, threshold=0.2)
     # for alpha in np.arange(0.8, 1, 0.01):
     #     compare = repHist(30, Actions, alpha=alpha, RF=2, threshold=0.2)
     #     print("Alpha=", alpha, stats.ttest_ind(base, compare))
 
-    """Epsilon-decrease 0.99 with 0.1 and 0.999"""
+
+
+    """T-test 3. Avg contribution of Epsilon-decrease 0.99 with 0.1 and 0.999"""
 
     # base = repHist(30, Actions, multiArm='decrease', epsilon=0.99)
     # for epsilon in (0.1, 0.999):
     #     compare = repHist(30, Actions, multiArm='decrease', epsilon=epsilon)
     #     print("Epsilon=", epsilon, stats.ttest_ind(base, compare))
 
-    """T-TEST for 0.999-decrease 5000 iterations with 0.9"""
+    """T-test 4. Avg contribution of 0.999-decrease 5000 iterations with 0.9"""
     # base = repHist(30, Actions, multiArm='decrease', epsilon=0.9)
     # compare = repHist(30, Actions, multiArm='decrease', epsilon=0.999, I=5000)
     # print(stats.ttest_ind(base, compare))
@@ -453,31 +372,5 @@ def main():
 
 
 
-
-
-
-    """T-TEST K,P"""
-
-    # t_test(30, Actions, K=(2, 99), P=0)    #pvalue=0.4278
-    # t_test(30, Actions, K=(2, 99), P=0.9)  #pvalue=0.4541
-    # for _ in range(5):
-    #     t_test(30, Actions, K=(2, 99), P=0.8) #pvalue=0.01502  ***
-    # t_test(30, Actions, K=(2, 99), P=0.85) #pvalue=0.1931
-    # t_test(30, Actions, K=(2, 99), P=0.75) #pvalue=0.5630
-    # t_test(30, Actions, K=2, P=(0, 0.9))   #pvalue=0.9806
-    # t_test(30, Actions, K=2, P=(0, 0.8))   #pvalue=0.4523
-    # t_test(30, Actions, K=(2, 99), P=0.9)  #pvalue=0.4541
-    # t_test(30, Actions, K=(2, 99), P=0.7)  #pvalue=0.3698
-    # t_test(30, Actions, K=99, P=(0, 0.8))  #pvalue=0.8167
-
-
-
-    # base = repHist(30, Actions, K=99)
-    # for p in np.arange(0.76, 0.85, 0.01):         #ALL >.05
-    #     compare = repHist(30, Actions, K=2, P=p)
-    #     print("K=2, P=", p, stats.ttest_ind(base, compare))
-
-
-
 if __name__ == '__main__':
     main()
\ No newline at end of file