diff --git a/game.py b/game.py
index bdf08de0f3095da5030fecd9bafc0b00c1aced7c..148022a7fc25dd799580eeebd80e1fd4c2617096 100644
--- a/game.py
+++ b/game.py
@@ -1 +1,761 @@
-test file
\ No newline at end of file
+# Author: Mingda Ma
+# Date: 2019.05.01
+import matplotlib.pyplot as plt
+import numpy as np
+import math
+# import utilis
+from itertools import combinations
+from collections import Counter
+import matplotlib.pyplot as plt
+from matplotlib import cm
+from mpl_toolkits.mplot3d import Axes3D
+from scipy import stats
+
+
+class climategame():
+    def __init__(self):
+        self.N = 60 # population
+        self.M = 2  # Randomly choose M players to play the game (normally 2)
+        self.RF = 0  # Parsed number of risk function chosen for the game
+        self.risk = 0.1 # Loss fraction
+        self.R = 6  # Rounds of a game
+        self.T = 2 * self.R  # public target
+        self.sanction = 1  # extra cost for defector
+        self.Generation = 80  
+        self.payoffs = [0 for i in range(self.N)]
+        self.fitness = [0] * self.N
+        self.commonPool = 0
+
+        # Set Players Strategies:
+        # contributions[a][b] represents the action of player a in round b
+        self.contributions = np.random.choice(3, size=(self.N, self.R, 4))  # 0 Defectors, 1 Fair Sharers, 2 Altruists
+        
+        # Show the relation of new generation and the initial generation 
+        self.correspondingStrategy = list(range(1, self.N + 1))     
+        # Store the strategies of the first generation
+        self.initStrategy = self.contributions 
+
+        for l in range(len(self.contributions)):
+            self.contributions[l][0]= np.random.choice([0,1,2])
+        # print("strategy: ", self.contributions)
+
+        self.RecordContributions = np.array([0]*self.R)  # Store the contribution in each generation
+
+
+    def balanceStrategy(self):
+        """
+        make sure some strategy like totally selfish, 20% contribution are in the population
+        invoked by  function: Risk_Punishment_Relationship()
+        """
+        contri = [[0,0,0,0] for i in range(self.R)]
+        for i in range(self.R):
+            a = 0
+            while a < i:
+                contri[a] = [1,1,1,1]
+                a += 1
+            self.contributions[i] = contri
+
+
+    def WFupdatePopulation(self):  
+        # print("!!!!!!!!!!!!!!!! start WF selection")
+        # print("before",self.correspondingStrategy)
+        w = 0.8
+        # for a in range(len(self.payoffs)):
+        #     if self.payoffs[a] != 0:
+        #         self.fitness[a] = math.log(self.payoffs[a])
+        #     else: 
+        #         self.fitness[a] = 0
+        for a in range(len(self.payoffs)):
+            self.fitness[a] = 1 - w + w* self.payoffs[a]
+        # print("fitness:", self.fitness)
+        if (sum(self.fitness)==0):
+            new_generation = list(range(1,self.N+1))
+        else:
+            prob = list(map(lambda x: x / sum(self.fitness), self.fitness))
+            new_generation = np.random.choice(list(range(1,self.N+1)), self.N, True, prob)
+        
+        temp = self.correspondingStrategy.copy()
+        for i in range(len(new_generation)):
+            self.correspondingStrategy[i] = temp[new_generation[i]-1]
+        # print("new_generation: ", new_generation)
+        # print("correspondingStrategy: ", self.correspondingStrategy)        
+        # print("dominant *************", Counter(new_generation).most_common())
+
+        new_contributions = self.contributions.copy()
+        for i in range(len(new_contributions)):
+            new_contributions[i] = self.contributions[(new_generation[i]-1)]
+        self.contributions = new_contributions
+        # print("new_contributions: ", self.contributions)
+
+    def MoranUpdate(self):
+        """
+            Moran update function
+        """
+        # print("!!!!!!!!!!!!!!!! start Moran selection")
+        Pmutation = 0.1
+        for a in range(len(self.payoffs)):
+            if self.payoffs[a] != 0:
+                self.fitness[a] = math.log(self.payoffs[a])
+            else: 
+                self.fitness[a] = 0      
+        # new generation
+        new_generation = self.correspondingStrategy.copy()
+        
+        r1 = np.random.randint(0,self.N)
+        r2 = np.random.randint(0,self.N)
+        if (np.random.random()<Pmutation):
+                #mutation   change to any strategy  can be negative or positive
+            new_generation[r1] = np.random.randint(0,self.N)
+            new_generation[r2] = np.random.randint(0,self.N)
+        else:
+            rlarge = r1 if self.fitness[r1] > self.fitness[r2] else r2
+            rsmall = r2 if self.fitness[r1] > self.fitness[r2] else r1
+            new_generation[rsmall] = new_generation[rlarge]
+
+        self.correspondingStrategy = new_generation
+        # temp = self.correspondingStrategy.copy()
+        # for i in range(len(new_generation)):
+        #     self.correspondingStrategy[i] = temp[new_generation[i]-1]
+        # print("new_generation: ", new_generation)
+        # print("correspondingStrategy: ", self.correspondingStrategy)        
+        # print("dominant *************", Counter(new_generation).most_common())
+        new_contributions = self.contributions.copy()
+        for i in range(len(new_contributions)):
+            new_contributions[i] = self.initStrategy[(new_generation[i]-1)]
+        self.contributions = new_contributions
+        # print("new_contributions: ", self.contributions)
+
+    
+    def FermiUpdate(self,b):
+        beta = b
+        # print("!!!!!!!!!!!!!!!! start Fermi selection")
+        for a in range(len(self.payoffs)):
+            if self.payoffs[a] != 0:
+                self.fitness[a] = math.log(self.payoffs[a])
+            else: 
+                self.fitness[a] = 0    
+
+        new_generation = self.correspondingStrategy.copy()
+        for i in range(self.N):
+            neighbour = np.random.randint(0,self.N)
+            # print(i, "'s neighbour is :",neighbour)
+            P_from_i_to_neighbour = 1/(1+math.exp(-beta*(self.fitness[i]-self.fitness[neighbour])))
+            birth_or_death = np.random.random()
+            if birth_or_death > P_from_i_to_neighbour: # replaced by neighbour --- death
+                new_generation[i] = self.correspondingStrategy[neighbour]
+        self.correspondingStrategy = new_generation
+        #print("new_generation: ", new_generation)
+        new_contributions = self.contributions.copy()
+        for i in range(len(new_contributions)):
+            new_contributions[i] = self.initStrategy[(new_generation[i]-1)]
+        self.contributions = new_contributions
+        
+    
+   
+
+    def DominantStrategy(self):
+        """
+        find out the Popular strategy
+        """
+        top_three = Counter(self.correspondingStrategy).most_common(3)
+        print("dominant Strategies: ", self.initStrategy[top_three[0][0]-1])
+
+
+
+    def selectPlayers(self):
+        """
+            Decide the game order 
+        """
+        return list(combinations(list(range(1, self.N + 1)), 2))
+
+    def showPayOffs(self):
+        print("the final payoff for all")
+        for index in range(0,len(self.payoffs)):
+            print("player:",index+1,"  payoffs: ",self.payoffs[index])
+
+    def play(self, playerlist):    
+        """
+        the process of a game between a pair players
+        """
+        c = [0] * self.M  # Total Investment of each player 截至目前的投入
+        moneyPlayerOwns = [2 * self.R] * self.M  # initial money
+        self.commonPool = 0
+
+        for i, playNo in list(enumerate(playerlist)):  # 1st round
+            choice = self.contributions[playNo-1][0][0]
+            self.RecordContributions[0] += choice
+            realcost = choice
+            c[i] += choice
+            self.commonPool += choice
+            moneyPlayerOwns[i] -= realcost
+            # print("player: ", playNo, " on round ", 1, " contribute: ", choice)
+
+        for r in range(2, self.R+1):  
+            commonP = self.commonPool
+            for i,playNo in list(enumerate(playerlist)):
+                power = (r - 1) - c[i] if (r - 1) - c[i] > 0 else 0
+                expectCost = (((self.T - commonP) - (commonP - c[i]) / (
+                        r - 1) * (self.R - r + 1)) / (self.R - r + 1)) * self.sanction ** power
+                # print("!!!!!!!!!!!!!!",expectCost)
+                if expectCost < 0:
+                    interval = 3
+                else:
+                    interval = int(expectCost) if int(expectCost) < 3 else 2  # possible value 0,1,2
+
+                choice = self.contributions[(playNo-1)][r-1][interval]
+                realcost = self.sanction ** power * choice
+                if moneyPlayerOwns[i] < realcost:
+                    # print("not enough money in round ", r)
+                    choice = 0
+                    c[i] = 0
+                else:
+                    c[i] += realcost   
+                    moneyPlayerOwns[i] -= realcost 
+                # print("player: ", playNo, " on round ", r, " contribute: ", choice)
+                self.RecordContributions[r-1] += choice
+                self.commonPool += choice
+                
+        # compute payoff
+        if self.T <= self.commonPool or np.random.random() > self.risk:
+            for i,playNo in list(enumerate(playerlist)):
+                self.payoffs[playNo-1] += 2*self.R - c[i]
+                # print("Player:", playNo," payoffs:",  self.payoffs[playNo-1])
+        else:
+            for i,playNo in list(enumerate(playerlist)):
+                self.payoffs[playNo-1] += 0
+                # print("Player:", playNo," payoffs:",  self.payoffs[playNo-1])
+        
+
+        # for i,playNo in list(enumerate(playerlist)):
+        #     print("Player:", playNo," remaining money:",  moneyPlayerOwns[i])
+
+        # print("commo nPool: ", self.commonPool)
+        if self.T <= self.commonPool:
+            return 1
+        else:
+            return 0
+
+    def oneGeneration(self,playerList,methodOrder,beta):
+        Cooperation = 0
+        for playerCouple in playerList:
+            # print("playercouple", playerCouple)
+            result = self.play(playerCouple)
+            Cooperation += result
+            # print("result: ", result)
+
+        # self.showPayOffs()
+        if (methodOrder == 0):
+            self.WFupdatePopulation()
+        if (methodOrder == 1):
+            self.MoranUpdate()
+        if (methodOrder == 2):
+            self.FermiUpdate(beta)
+
+        self.payoffs = [0 for i in range(self.N)]
+        self.fitness = [0] * self.N
+        self.commonPool = 0
+        # print("Cooperation: ", Cooperation)
+        return Cooperation
+    
+
+
+def Plot():
+    print("game start  !!!!!!")
+    names = locals()
+    gameInit = climategame()
+    strategySpace = gameInit.contributions
+
+    for index in range(1):   #change here to make more tests
+        # locals()['ex_'+str(index)]=[]
+        names['n' + str(index) ] = []
+        game = climategame()
+        game.contributions = strategySpace   # 保证策略是一致的
+        game.risk = 0.1
+        game.Generation = 150
+        # game.risk += index/5
+        # print("risk = ", game.risk)
+        playerList = game.selectPlayers() #是所有的对局安排 要从这里面选任意一局 传入game
+        for i in range(0,game.Generation):
+            print("Generation:" ,i)
+            # game.DominantStrategy()
+            Cooperation = game.oneGeneration(playerList,0,1)  # sequence number of update method
+            
+            ratio = Cooperation / (game.N*(game.N-1)/2)
+
+            names.get('n' + str(index)).append(ratio)
+        plt.plot([x for x in range(len(names.get('n' + str(index))))], names.get('n' + str(index)), label=game.risk)
+    plt.xlabel('Generation')
+    plt.ylabel('Proportion')
+    plt.title('evolutionary dynamics in collective-risk dilemmas')
+    plt.legend()
+    plt.show()
+       
+def Compare3Update():
+    """
+        for a same population under different risk 
+        use 3 different update method and show the results
+
+        Figure 1 in report
+    """
+    print("game start  !!!!!!")
+    names = locals()
+   
+    gameInit = climategame()
+    strategySpace = gameInit.contributions
+    gameInit2 = climategame()
+    strategySpace2 = gameInit2.contributions
+    gameInit3 = climategame()
+    strategySpace3 = gameInit3.contributions
+
+    for index in range(1):
+        # plt.subplot(5,2,index+1)
+        index = 0
+        for method in range(3):   # for 3 different update methods
+            names['n' + str(method) ] = []
+            game1 = climategame()
+            game1.contributions = strategySpace   # use the same strategy sets
+            game1.risk += index/10
+            game2 = climategame()
+            game2.contributions = strategySpace2   
+            game2.risk += index/10
+            game3 = climategame()
+            game3.contributions = strategySpace3   
+            game3.risk += index/10
+            game4 = climategame()
+            game4.contributions = strategySpace   
+            game4.risk += index/10
+            game5 = climategame()
+            game5.contributions = strategySpace2   
+            game5.risk += index/10
+            game6 = climategame()
+            game6.contributions = strategySpace3   
+            game6.risk += index/10
+            print("risk = ", game1.risk)
+            playerList = game1.selectPlayers() 
+            for i in range(0,game1.Generation):
+                # print("Generation:" ,i)
+                # game.DominantStrategy()
+                Cooperation1 = game1.oneGeneration(playerList,method,1)
+                Cooperation2 = game2.oneGeneration(playerList,method,1)
+                Cooperation3 = game3.oneGeneration(playerList,method,1)
+                Cooperation4 = game4.oneGeneration(playerList,method,1)
+                Cooperation5 = game5.oneGeneration(playerList,method,1)
+                Cooperation6 = game6.oneGeneration(playerList,method,1)
+                # calculate the cooperation ratio of current generation
+                ratio1 = Cooperation1 / (game1.N*(game1.N-1)/2)
+                ratio2 = Cooperation2 / (game1.N*(game1.N-1)/2)
+                ratio3 = Cooperation3 / (game1.N*(game1.N-1)/2)
+                ratio4 = Cooperation4 / (game1.N*(game1.N-1)/2)
+                ratio5 = Cooperation5 / (game1.N*(game1.N-1)/2)
+                ratio6 = Cooperation6 / (game1.N*(game1.N-1)/2)
+
+                ratio = (ratio1+ratio2+ratio3+ratio4+ratio5+ratio6)/6
+                names.get('n' + str(method)).append(ratio)
+            plt.xlabel('Generation')
+            plt.ylabel('Proportion')
+            title = "Risk = " + str(game1.risk)
+            plt.title(title)   
+            if (method == 0): la = "WF"
+            if (method == 1): la = "Moran"
+            if (method == 2): la = "Fermi"
+            plt.plot([x for x in range(len(names.get('n' + str(method))))], names.get('n' + str(method)),label = la)
+        plt.legend(loc = 'best')
+        plt.show()
+
+
+    
+    
+
+def BetaOfFermiPlot():
+    """
+    change beta from 0.1 to 10 and see the result of evaluation process
+    """
+    plt.rcdefaults()
+    fig, ax = plt.subplots()
+    print("game start  !!!!!!")
+    names = locals()
+    gameInit = climategame()
+    strategySpace = gameInit.contributions
+    CooperationArray = [0,0,0,0,0,0,0]
+    std = [0,0,0,0,0,0,0]   # std
+    betalist = (0.01,0.1,0.5,1,3,5,7)
+    DominantStrategyNumeber = 0
+  
+    for beta in range(len(betalist)):
+        for i in range(10):
+            WFR = []
+            record = [0]*10
+            game = climategame()
+            game.contributions = strategySpace  
+            playerList = game.selectPlayers() 
+            Cooperation= 0
+            for index in range(0,game.Generation):
+                print("Generation:" ,index)
+                #game.DominantStrategy()
+                # Cooperation = game.oneGeneration(playerList,2,betalist[beta])
+                Cooperation = 0
+                for playerCouple in playerList:
+                    result = game.play(playerCouple)
+                    Cooperation += result
+                game.FermiUpdate(beta)
+                WFR.append(game.payoffs.copy())
+                DominantStrategyNumeber = Counter(game.correspondingStrategy).most_common(1)[0][0]
+                game.payoffs = [0 for i in range(game.N)]
+                game.fitness = [0] * game.N
+                game.commonPool = 0                
+            record[i] = WFR[len(WFR)-1][DominantStrategyNumeber-1]/(game.N-1)
+        CooperationArray[beta] = np.sum(record)
+        std[beta] = np.std(record)
+    
+    
+    y_pos = np.arange(len(betalist))
+    ax.barh(y_pos, CooperationArray, xerr=std, align='center',
+        color='green', ecolor='black')
+    ax.set_yticks(y_pos)
+    ax.set_yticklabels(betalist)
+    ax.invert_yaxis()  # labels read top-to-bottom
+    ax.set_xlabel('Performance')
+    ax.set_title('test')    
+    plt.show()
+
+
+def NumberOfDomStrategu():
+    """
+    Draw the figure of Number of Popular Strategy with generations
+    Fig 2 in report
+
+    and if change the generation number to 300
+    this function can draw the Fig 3
+    """
+    average1 = []
+    average2 = []
+    gameInit = climategame()
+    strategySpace = gameInit.contributions
+    WFplot = [0] * gameInit.Generation
+    Moranplot = [0] * gameInit.Generation
+    Fermiplot = [0] * gameInit.Generation
+
+    for index in range(30):
+        WFR = []
+        MoranR = []
+        FermiR = []
+        gameInit = climategame()
+        strategySpace = gameInit.contributions
+        for method in range(3):   # for 3 different update methods
+            game = climategame()
+            game.contributions = strategySpace   
+            game.risk = 0.9       # change the risk probability here
+            playerList = game.selectPlayers() 
+            for i in range(0,game.Generation):
+                print("Generation:" ,i)
+                if (method == 0):
+                    WFR.append(game.correspondingStrategy.copy())  
+                if (method == 1):
+                    MoranR.append(game.correspondingStrategy)
+                if (method == 2):
+                    FermiR.append(game.correspondingStrategy)
+                Cooperation = game.oneGeneration(playerList,method,1)
+        DominantNumber0 = Counter(WFR[len(WFR)-1]).most_common(1)
+        DominantNumber1 = Counter(MoranR[len(MoranR)-1]).most_common(1)
+        DominantNumber2 = Counter(FermiR[len(FermiR)-1]).most_common(1)
+
+        for i in range(0,len(WFR)):
+            WFplot[i] += WFR[i].count(DominantNumber0[0][0])
+            Moranplot[i] += MoranR[i].count(DominantNumber1[0][0])
+            Fermiplot[i] += FermiR[i].count(DominantNumber2[0][0])
+
+    WFplot = [i/30 for i in WFplot]
+    Moranplot = [i/30 for i in Moranplot]
+    Fermiplot = [i/30 for i in Fermiplot]
+
+    plt.plot(list(range(1,gameInit.Generation+1)), WFplot, label = "WF")
+    plt.plot(list(range(1,gameInit.Generation+1)), Moranplot,label = "Moran") 
+    plt.plot(list(range(1,gameInit.Generation+1)), Fermiplot, label = "Fermi")
+    plt.title("Risk = 0.9")
+    plt.xlabel('Generation')
+    plt.ylabel('Number Of Dstrategy')
+    plt.legend()
+    plt.show()
+
+def AveragePayOffsOf3():
+    """
+    Fig 4 in report
+    """
+    gameInit = climategame()
+    strategySpace = gameInit.contributions
+    WFplot = [0] * gameInit.Generation  #average of payoff of popular straregy in each round
+    Moranplot = [0] * gameInit.Generation
+    Fermiplot = [0] * gameInit.Generation
+
+    for index in range(30):
+        DominantStrategyNumeber = 0
+        WFR = []
+        MoranR = []
+        FermiR = []
+        gameInit = climategame()
+        strategySpace = gameInit.contributions
+        for method in range(3):   # for 3 different update methods
+            game = climategame()
+            game.contributions = strategySpace   
+            game.risk = 0.1
+            playerList = game.selectPlayers() 
+            for i in range(0,game.Generation):
+                print("Generation:" ,i)
+                Cooperation = 0
+                for playerCouple in playerList:
+            # print("playercouple", playerCouple)
+                    result = game.play(playerCouple)
+                    # Cooperation += result
+                if (method == 0):
+                    game.WFupdatePopulation()
+                    WFR.append(game.payoffs.copy())
+                if (method == 1):
+                    game.MoranUpdate()
+                    MoranR.append(game.payoffs.copy())
+                if (method == 2):
+                    game.FermiUpdate(1)
+                    FermiR.append(game.payoffs.copy())
+                DominantStrategyNumeber = Counter(game.correspondingStrategy).most_common(1)[0][0]
+                game.payoffs = [0 for i in range(game.N)]
+                game.fitness = [0] * game.N
+                game.commonPool = 0
+                # print("Cooperation: ", Cooperation)
+
+        for i in range(0,len(WFR)):
+            WFplot[i] += WFR[i][DominantStrategyNumeber-1]/(game.N-1)
+            Moranplot[i] += MoranR[i][DominantStrategyNumeber-1]/(game.N-1)
+            Fermiplot[i] += FermiR[i][DominantStrategyNumeber-1]/(game.N-1)
+
+    WFplot = [i/30 for i in WFplot]
+    Moranplot = [i/30 for i in Moranplot]
+    Fermiplot = [i/30 for i in Fermiplot]
+
+    plt.plot(list(range(1,gameInit.Generation+1)), WFplot, label = "WF")
+    plt.plot(list(range(1,gameInit.Generation+1)), Moranplot,label = "Moran") 
+    plt.plot(list(range(1,gameInit.Generation+1)), Fermiplot, label = "Fermi")
+           
+    plt.xlabel('Generation')
+    plt.ylabel('AveragePayOffs')
+    plt.legend()
+    plt.show()
+
+def Risk_Punishment_Relationship():
+    """
+    Not be used in report
+    """
+    X = np.arange(0.1, 1.1, 0.1)
+    Y = np.arange(1.001, 1.01, 0.001)
+    X, Y = np.meshgrid(X, Y)
+    Z = np.array([[0,0,0,0,0,0,0,0,0,0],
+        [0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],
+        [0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],
+        [0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],
+        [0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],
+        [0,0,0,0,0,0,0,0,0,0]])  #initialize
+
+    for index in range(1):
+        
+        DominantStrategyNumeber = 0
+        gameInit = climategame()
+        gameInit.balanceStrategy()
+        # print("let me see:",gameInit.contributions)
+        strategySpace = gameInit.contributions
+        for risk in range(10):
+            for punish in range(10):
+                print("location:", (risk,punish))
+                game = climategame()
+                game.contributions = strategySpace  
+                playerList = game.selectPlayers() 
+                game.risk = (risk+1)/10
+                game.sanction = 1.001+ punish/1000
+                # game.sanction = 1.01
+                pof = 0
+                WFR = []
+                for i in range(0,game.Generation):
+                    # print("Generation:" ,i)
+                    Cooperation = 0
+                    for playerCouple in playerList:
+                        result = game.play(playerCouple)
+                        Cooperation += result
+                    # game.FermiUpdate(1)
+                    game.WFupdatePopulation()
+                    WFR.append(game.payoffs.copy())
+                    DominantStrategyNumeber = Counter(game.correspondingStrategy).most_common(1)[0][0]
+                    game.payoffs = [0 for i in range(game.N)]
+                    game.fitness = [0] * game.N
+                    game.commonPool = 0
+                    # print("Cooperation: ", Cooperation)
+                # print("Which Strategy: ", DominantStrategyNumeber)
+                for index in range(0,len(WFR)):
+                    pof += WFR[index][DominantStrategyNumeber-1]  
+                Z[punish][risk] = pof/len(WFR)
+                print(Z[punish][risk])
+    # print(Z)
+    fig = plt.figure()
+    ax = Axes3D(fig)
+    ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.viridis)
+    ax.set_xlabel('Risk')
+    ax.set_ylabel('Penalty Factor')
+    ax.set_zlabel('Payoffs')
+    plt.show()
+    
+
+def Contribution_risk_penalty():
+    """
+    Fig 6 
+    Change the parameter in the method
+    """
+    data = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0, 'R5': 0, 'R6': 0}
+    repeat = 50
+    gameInit = climategame()
+    values = np.zeros(gameInit.R)
+    gameInit.sanction = 1.6
+    gameInit.risk= 0.6
+        # print("let me see:",gameInit.contributions)
+    strategySpace = gameInit.contributions
+    for risk in range(repeat):
+        for punish in range(1):
+            print("location:", (risk,punish))
+            game = climategame()
+            playerList = game.selectPlayers() 
+            # game.sanction = 1.001+ punish/1000
+            game.risk = gameInit.risk
+            game.sanction = gameInit.sanction
+            for i in range(0,game.Generation):
+                Cooperation = 0
+                for playerCouple in playerList:
+                    result = game.play(playerCouple)
+                    Cooperation += result
+                # game.FermiUpdate(1)
+                game.WFupdatePopulation()
+                if(i == game.Generation-1):
+                     values += game.RecordContributions
+                game.RecordContributions = [0]*game.R
+                game.payoffs = [0 for i in range(game.N)]
+                game.fitness = [0] * game.N
+                game.commonPool = 0
+
+
+    values = values/(repeat*(gameInit.N*(gameInit.N-1)/2))
+    ave = values.mean()
+
+    # print(values)
+    names = list(data.keys())
+    plt.bar(names, values)
+    # plt.hlines(ave,0,names[len(names)-1], colors = 'k',linestyles='dashdot',label = str(ave))
+    plt.axhline(y=ave, xmin=0,linestyle = '-.',color='k')
+    plt.xlabel('round')
+    plt.ylabel('Contribution')
+    plt.yticks(np.arange(0,5,1))
+    title = "Contribution, risk = " + str(gameInit.risk) + ", Penalty = " + str(gameInit.sanction) 
+    plt.title(title)
+    plt.savefig("r = "+ str(gameInit.risk)+" P="+str(gameInit.sanction)+ ".png")
+    plt.show()
+
+def onlychangeRisk(): 
+    """
+    Fig 5
+    set the parameter in the init function
+    """
+    WFR = []
+    DominantStrategyNumeber = 0
+    plot = [0,0,0,0,0,0,0,0,0,0]
+
+    for time in range(15):
+        gameInit = climategame()
+        gameInit.balanceStrategy()
+        strategySpace = gameInit.contributions 
+        for risk in range(10):
+            game = climategame()
+            game.contributions = strategySpace   
+            playerList = game.selectPlayers() 
+            game.risk = (risk+1)/10
+            print(time)
+            print(game.risk)
+            pof = 0
+            for i in range(0,game.Generation):
+             # print("Generation:" ,i)
+                Cooperation = 0
+                for playerCouple in playerList:
+                    result = game.play(playerCouple)
+                    Cooperation += result
+                game.WFupdatePopulation()
+                # game.FermiUpdate(1)
+                WFR.append(game.payoffs.copy())
+                DominantStrategyNumeber = Counter(game.correspondingStrategy).most_common(1)[0][0]
+                game.payoffs = [0 for i in range(game.N)]
+                game.fitness = [0] * game.N
+                game.commonPool = 0
+            game.DominantStrategy()
+            # for index in range(0,len(WFR)):
+            #     pof +=  WFR[len(WFR)-1][DominantStrategyNumeber-1]/(game.N-1)
+            # plot[risk] = pof/len(WFR)
+            plot[risk] += WFR[len(WFR)-1][DominantStrategyNumeber-1]/(game.N-1)
+
+    plot = [a/15 for a in plot]
+    plt.plot([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1], plot, label = "test")
+           
+    plt.xlabel('risk')
+    plt.ylabel('AveragePayOffs')
+    plt.legend()
+    plt.show()
+
+
+def t_test_OnlyRisk():
+    """
+    t_test to check the payoff on risk = 0.6 and 0.9 to prove the difference
+    """
+    WFR = []
+    pointSix = []
+    pointNine = []
+    DominantStrategyNumeber1 = 0
+    DominantStrategyNumeber2 = 0
+    for i in range(30):
+        gameInit = climategame()
+        # gameInit.balanceStrategy()
+        strategySpace = gameInit.contributions 
+        game1 = climategame()
+        game1.contributions = strategySpace   
+        playerList = game1.selectPlayers() 
+        game1.risk = 0.6
+        game2 = climategame()
+        game2.contributions = strategySpace  
+        game1.risk = 0.9
+        for i in range(0,game1.Generation):
+            for playerCouple in playerList:
+                result = game1.play(playerCouple)
+            game1.WFupdatePopulation()
+            WFR.append(game1.payoffs.copy())
+            DominantStrategyNumeber1 = Counter(game1.correspondingStrategy).most_common(1)[0][0]
+            game1.payoffs = [0 for i in range(game1.N)]
+            game1.fitness = [0] * game1.N
+            game1.commonPool = 0
+        pointSix.append(WFR[len(WFR)-1][DominantStrategyNumeber1-1]/(game1.N-1))
+        for i in range(0,game2.Generation):
+            for playerCouple in playerList:
+                result = game2.play(playerCouple)
+            game2.WFupdatePopulation()
+            WFR.append(game2.payoffs.copy())
+            DominantStrategyNumeber2 = Counter(game2.correspondingStrategy).most_common(1)[0][0]
+            game2.payoffs = [0 for i in range(game2.N)]
+            game2.fitness = [0] * game2.N
+            game2.commonPool = 0
+        pointNine.append(WFR[len(WFR)-1][DominantStrategyNumeber2-1]/(game2.N-1))
+    print(pointSix)
+    print(pointNine)
+    print(stats.ttest_ind(pointSix,pointNine, equal_var = False))
+
+def main():
+    # Plot()
+    # Test()
+    # Compare3Update()
+    # BetaOfFermiPlot()
+    # NumberOfDomStrategu()
+    AveragePayOffsOf3()
+    # Risk_Punishment_Relationship()
+    # onlychangeRisk()
+    # t_test_OnlyRisk()
+    # Contribution_risk_penalty()
+
+if __name__ == '__main__':
+    main()