5  Simulación

Para completar este trabajo mostramos un código que nos ayuda a encontrar una politica óptima. Esto lo haremos estimando los parametros definidos en la sección Capítulo 2 para los 4 equipos (Chivas, América, Cruz Azul).

5.1 Club Deportido Guadalajara

Según datos extraídos de FBREF obtenemos los siguientes parámetros:

Código
# Parámetros de transición y recompensas
# C_1
alpha_1p = 0.43
alpha_2p = 0.23
alpha_3p = 0.12
alpha_4p = 0.22
alpha_1r = 0.28
alpha_2r = 0.20
alpha_3r = 0.52

# C_2
beta_1p = 0.34
beta_2p = 0.21  
beta_3p = 0.14
beta_4p = 0.31
beta_1r = 0.20
beta_2r = 0.12
beta_3r = 0.07
beta_4r = 0.61
beta_1t = 0.01
beta_2t = 0.99

# C_3
gamma_1p = 0.05 
gamma_2p = 0.47
gamma_3p = 0.18
gamma_4p = 0.30
gamma_1r = 0.14
gamma_2r = 0.08
gamma_3r = 0.78
gamma_1t = 0.10
gamma_2t = 0.90

Con esto podemos obtener la política óptima. Además por motivos de simulación agregamos ciertos costo y recompensas para cada estado dependiendo de la acción, esto se me modificará

Código
import numpy as np 
import matplotlib.pyplot as plt
# Parámetros de transición y recompensas
# C_1
alpha_1p = 0.43
alpha_2p = 0.23
alpha_3p = 0.12
alpha_4p = 0.22
alpha_1r = 0.28
alpha_2r = 0.20
alpha_3r = 0.52

# C_2
beta_1p = 0.34
beta_2p = 0.21  
beta_3p = 0.14
beta_4p = 0.31
beta_1r = 0.20
beta_2r = 0.12
beta_3r = 0.07
beta_4r = 0.61
beta_1t = 0.01
beta_2t = 0.99

# C_3
gamma_1p = 0.05 
gamma_2p = 0.47
gamma_3p = 0.18
gamma_4p = 0.30
gamma_1r = 0.14
gamma_2r = 0.08
gamma_3r = 0.78
gamma_1t = 0.10
gamma_2t = 0.90

# Estados y acciones
states = ['c_1','c_2','c_3','lp','np','gs']
actions = ['p','r','t']
rewards = {
    'c_1':{'p':0.1,'r':-0.2,'t':0},
    'c_2':{'p':0.1,'r':0.1,'t':0},
    'c_3':{'p':0.1,'r':0.2,'t':0},
    'lp':{'p':-0.1,'r':-0.1,'t':0},
    'np':{'p':0,'r':0,'t':-0.1},
    'gs':{'p':0,'r':0,'t':1}}
transition = {
    'c_1':{'p':{'c_1':alpha_1p,'c_2':alpha_2p,'c_3':alpha_3p,'lp':alpha_4p,'np':0,'gs':0},
           'r':{'c_1':alpha_1r,'c_2':alpha_2r,'c_3':0,'lp':alpha_3r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':0}},

    'c_2':{'p':{'c_1':beta_1p,'c_2':beta_2p,'c_3':beta_3p,'lp':beta_4p,'np':0,'gs':0},
           'r':{'c_1':beta_1r,'c_2':beta_2r,'c_3':beta_3r,'lp':beta_4r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':beta_1t,'gs':beta_2t}},

    'c_3':{'p':{'c_1':gamma_1p,'c_2':gamma_2p,'c_3':gamma_3p,'lp':gamma_4p,'np':0,'gs':0},
           'r':{'c_1':0,'c_2':gamma_1r,'c_3':gamma_2r,'lp':gamma_3r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':gamma_1t,'gs':gamma_2t}},

    'lp':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0}},

    'np':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0}},

    'gs':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1}}}

# Función para Value Iteration
def value_iteration():
    V = {s: 0 for s in states}
    cont = 0
    while (True and cont<10000):
        new_V = {}
        for s in states:
            values = []
            for a in actions:
                value = rewards[s][a]
                for s2 in states:
                    value += transition[s][a][s2] * V[s2]
                    #print(value)
                values.append(value)
            #print(values)    
            new_V[s]= max(values)
            #print(f'nuevo vector {new_V}')

        if all(abs(V[s]-new_V[s])<0.0001 for s in states):
            return new_V
        cont = cont + 1 
        V = new_V
        return V
V = value_iteration()        

# Política óptima
policy = {}
for s in states:
    values = []
    for a in actions:
        value = rewards[s][a]
        for s2 in states:
            value+= transition[s][a][s2] * V[s2]
        values.append(value)
    policy[s] = actions[np.argmax(values)]
print('Política Óptima')
print(policy)

# Visualización de los valores de política
policy_values = np.zeros((len(states),len(actions)))
for i, s in enumerate(states):
    for j, a in enumerate(actions):
        policy_values[i, j] = rewards[s][a] + sum(transition[s][a][s2] * V[s2] for s2 in states)
plt.imshow(policy_values, cmap="Greys", aspect="auto")
plt.xticks(ticks=range(len(actions)), labels=actions)
plt.yticks(ticks=range(len(states)), labels=states)
plt.colorbar(label="Valor")
plt.title("Valores de la Política Óptima")
plt.show()
Política Óptima
{'c_1': 'p', 'c_2': 't', 'c_3': 't', 'lp': 't', 'np': 'p', 'gs': 't'}

5.2 Club América

Según datos extraídos de FBREF obtenemos los siguientes parámetros:

Código
# Parámetros de transición y recompensas
# C_1
alpha_1p = 0.59
alpha_2p = 0.27
alpha_3p = 0.02
alpha_4p = 0.12
alpha_1r = 0.54
alpha_2r = 0.39
alpha_3r = 0.07

# C_2
beta_1p = 0.31
beta_2p = 0.45  
beta_3p = 0.12
beta_4p = 0.12
beta_1r = 0.29
beta_2r = 0.24
beta_3r = 0.16
beta_4r = 0.31
beta_1t = 0.004
beta_2t = 0.996

# C_3
gamma_1p = 0.09 
gamma_2p = 0.58
gamma_3p = 0.10
gamma_4p = 0.23
gamma_1r = 0.14
gamma_2r = 0.10
gamma_3r = 0.76
gamma_1t = 0.17
gamma_2t = 0.83

Con esto podemos obtener la política óptima. Además por motivos de simulación agregamos ciertos costo y recompensas para cada estado dependiendo de la acción.

Código
import numpy as np 
import matplotlib.pyplot as plt
# Parámetros de transición y recompensas
# C_1
alpha_1p = 0.59
alpha_2p = 0.27
alpha_3p = 0.02
alpha_4p = 0.12
alpha_1r = 0.54
alpha_2r = 0.39
alpha_3r = 0.07

# C_2
beta_1p = 0.31
beta_2p = 0.45  
beta_3p = 0.12
beta_4p = 0.12
beta_1r = 0.29
beta_2r = 0.24
beta_3r = 0.16
beta_4r = 0.31
beta_1t = 0.004
beta_2t = 0.996

# C_3
gamma_1p = 0.09 
gamma_2p = 0.58
gamma_3p = 0.10
gamma_4p = 0.23
gamma_1r = 0.14
gamma_2r = 0.10
gamma_3r = 0.76
gamma_1t = 0.17
gamma_2t = 0.83

# Estados y acciones
states = ['c_1','c_2','c_3','lp','np','gs']
actions = ['p','r','t']
rewards = {
    'c_1':{'p':0.1,'r':-0.2,'t':0},
    'c_2':{'p':0.1,'r':0.1,'t':0},
    'c_3':{'p':0.1,'r':0.2,'t':0},
    'lp':{'p':-0.1,'r':-0.1,'t':0},
    'np':{'p':0,'r':0,'t':-0.1},
    'gs':{'p':0,'r':0,'t':1}}
transition = {
    'c_1':{'p':{'c_1':alpha_1p,'c_2':alpha_2p,'c_3':alpha_3p,'lp':alpha_4p,'np':0,'gs':0},
           'r':{'c_1':alpha_1r,'c_2':alpha_2r,'c_3':0,'lp':alpha_3r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':0}},

    'c_2':{'p':{'c_1':beta_1p,'c_2':beta_2p,'c_3':beta_3p,'lp':beta_4p,'np':0,'gs':0},
           'r':{'c_1':beta_1r,'c_2':beta_2r,'c_3':beta_3r,'lp':beta_4r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':beta_1t,'gs':beta_2t}},

    'c_3':{'p':{'c_1':gamma_1p,'c_2':gamma_2p,'c_3':gamma_3p,'lp':gamma_4p,'np':0,'gs':0},
           'r':{'c_1':0,'c_2':gamma_1r,'c_3':gamma_2r,'lp':gamma_3r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':gamma_1t,'gs':gamma_2t}},

    'lp':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0}},

    'np':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0}},

    'gs':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1}}}

# Función para Value Iteration
def value_iteration():
    V = {s: 0 for s in states}
    cont = 0
    while (True and cont<10000):
        new_V = {}
        for s in states:
            values = []
            for a in actions:
                value = rewards[s][a]
                for s2 in states:
                    value += transition[s][a][s2] * V[s2]
                    #print(value)
                values.append(value)
            #print(values)    
            new_V[s]= max(values)
            #print(f'nuevo vector {new_V}')

        if all(abs(V[s]-new_V[s])<0.0001 for s in states):
            return new_V
        cont = cont + 1 
        V = new_V
        return V
V = value_iteration()        

# Política óptima
policy = {}
for s in states:
    values = []
    for a in actions:
        value = rewards[s][a]
        for s2 in states:
            value+= transition[s][a][s2] * V[s2]
        values.append(value)
    policy[s] = actions[np.argmax(values)]
print('Política Óptima')
print(policy)

# Visualización de los valores de política
policy_values = np.zeros((len(states),len(actions)))
for i, s in enumerate(states):
    for j, a in enumerate(actions):
        policy_values[i, j] = rewards[s][a] + sum(transition[s][a][s2] * V[s2] for s2 in states)
plt.imshow(policy_values, cmap="Greys", aspect="auto")
plt.xticks(ticks=range(len(actions)), labels=actions)
plt.yticks(ticks=range(len(states)), labels=states)
plt.colorbar(label="Valor")
plt.title("Valores de la Política Óptima")
plt.show()
Política Óptima
{'c_1': 'p', 'c_2': 't', 'c_3': 't', 'lp': 't', 'np': 'p', 'gs': 't'}

5.3 Cruz Azul

Según datos extraídos de FBREF obtenemos los siguientes parámetros:

Código
# Parámetros de transición y recompensas
# C_1
alpha_1p = 0.63
alpha_2p = 0.28
alpha_3p = 0.01
alpha_4p = 0.08
alpha_1r = 0.52
alpha_2r = 0.29
alpha_3r = 0.19

# C_2
beta_1p = 0.41
beta_2p = 0.33  
beta_3p = 0.12
beta_4p = 0.14
beta_1r = 0.19
beta_2r = 0.28
beta_3r = 0.11
beta_4r = 0.52
beta_1t = 0.009
beta_2t = 0.991

# C_3
gamma_1p = 0.10 
gamma_2p = 0.37
gamma_3p = 0.19
gamma_4p = 0.34
gamma_1r = 0.21
gamma_2r = 0.09
gamma_3r = 0.70
gamma_1t = 0.08
gamma_2t = 0.92

Con esto podemos obtener la política óptima. Además por motivos de simulación agregamos ciertos costo y recompensas para cada estado dependiendo de la acción.

Código
import numpy as np 
import matplotlib.pyplot as plt

# Parámetros de transición y recompensas
# C_1
alpha_1p = 0.63
alpha_2p = 0.28
alpha_3p = 0.01
alpha_4p = 0.08
alpha_1r = 0.52
alpha_2r = 0.29
alpha_3r = 0.19

# C_2
beta_1p = 0.41
beta_2p = 0.33  
beta_3p = 0.12
beta_4p = 0.14
beta_1r = 0.19
beta_2r = 0.28
beta_3r = 0.11
beta_4r = 0.52
beta_1t = 0.009
beta_2t = 0.991

# C_3
gamma_1p = 0.10 
gamma_2p = 0.37
gamma_3p = 0.19
gamma_4p = 0.34
gamma_1r = 0.21
gamma_2r = 0.09
gamma_3r = 0.70
gamma_1t = 0.08
gamma_2t = 0.92

# Estados y acciones
states = ['c_1','c_2','c_3','lp','np','gs']
actions = ['p','r','t']
rewards = {
    'c_1':{'p':0.1,'r':-0.2,'t':0},
    'c_2':{'p':0.1,'r':0.1,'t':0},
    'c_3':{'p':0.1,'r':0.2,'t':0},
    'lp':{'p':-0.1,'r':-0.1,'t':0},
    'np':{'p':0,'r':0,'t':-0.1},
    'gs':{'p':0,'r':0,'t':1}}
transition = {
    'c_1':{'p':{'c_1':alpha_1p,'c_2':alpha_2p,'c_3':alpha_3p,'lp':alpha_4p,'np':0,'gs':0},
           'r':{'c_1':alpha_1r,'c_2':alpha_2r,'c_3':0,'lp':alpha_3r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':0}},

    'c_2':{'p':{'c_1':beta_1p,'c_2':beta_2p,'c_3':beta_3p,'lp':beta_4p,'np':0,'gs':0},
           'r':{'c_1':beta_1r,'c_2':beta_2r,'c_3':beta_3r,'lp':beta_4r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':beta_1t,'gs':beta_2t}},

    'c_3':{'p':{'c_1':gamma_1p,'c_2':gamma_2p,'c_3':gamma_3p,'lp':gamma_4p,'np':0,'gs':0},
           'r':{'c_1':0,'c_2':gamma_1r,'c_3':gamma_2r,'lp':gamma_3r,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':gamma_1t,'gs':gamma_2t}},

    'lp':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':1,'np':0,'gs':0}},

    'np':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':1,'gs':0}},

    'gs':{'p':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1},
           'r':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1},
           't':{'c_1':0,'c_2':0,'c_3':0,'lp':0,'np':0,'gs':1}}}

# Función para Value Iteration
def value_iteration():
    V = {s: 0 for s in states}
    cont = 0
    while (True and cont<10000):
        new_V = {}
        for s in states:
            values = []
            for a in actions:
                value = rewards[s][a]
                for s2 in states:
                    value += transition[s][a][s2] * V[s2]
                    #print(value)
                values.append(value)
            #print(values)    
            new_V[s]= max(values)
            #print(f'nuevo vector {new_V}')

        if all(abs(V[s]-new_V[s])<0.0001 for s in states):
            return new_V
        cont = cont + 1 
        V = new_V
        return V
V = value_iteration()        

# Política óptima
policy = {}
for s in states:
    values = []
    for a in actions:
        value = rewards[s][a]
        for s2 in states:
            value+= transition[s][a][s2] * V[s2]
        values.append(value)
    policy[s] = actions[np.argmax(values)]
print('Política Óptima')
print(policy)

# Visualización de los valores de política
policy_values = np.zeros((len(states),len(actions)))
for i, s in enumerate(states):
    for j, a in enumerate(actions):
        policy_values[i, j] = rewards[s][a] + sum(transition[s][a][s2] * V[s2] for s2 in states)
plt.imshow(policy_values, cmap="Greys", aspect="auto")
plt.xticks(ticks=range(len(actions)), labels=actions)
plt.yticks(ticks=range(len(states)), labels=states)
plt.colorbar(label="Valor")
plt.title("Valores de la Política Óptima")
plt.show()
Política Óptima
{'c_1': 'p', 'c_2': 't', 'c_3': 't', 'lp': 't', 'np': 'p', 'gs': 't'}