Behavioural Cloning

Bases: Method

Behavioural Clonning method based on (Pomerleau, 1988)

Source code in src/benchmark/methods/bc.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
class BC(Method):
    """Behavioural Clonning method based on (Pomerleau, 1988)"""

    __version__ = "1.0.0"
    __author__ = "Pomerleau"
    __method_name__ = "Behavioural Cloning"

    def __init__(
        self,
        environment: Env,
        enjoy_criteria: int = 100,
        verbose: bool = False,
        config_file: str = None
    ) -> None:
        """Initialize BC method."""
        self.enjoy_criteria = enjoy_criteria
        self.verbose = verbose
        try:
            self.environment_name = environment.spec.name
        except AttributeError:
            self.environment_name = environment.spec._env_name
        self.save_path = f"./tmp/bc/{self.environment_name}/"

        if config_file is None:
            config_file = CONFIG_FILE

        self.hyperparameters = import_hyperparameters(
            config_file,
            self.environment_name,
        )

        super().__init__(
            environment,
            self.hyperparameters
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward method for the method.

        Args:
            x (torch.Tensor): input.

        Returns:
            x (torch.Tensor): logits output.
        """
        return self.policy(x)

    def save(self, path: str = None, name: str = None) -> None:
        """Save all model weights.

        Args:
            path (str): where to save the models. Defaults to None.
        """
        path = self.save_path if path is None else path
        if not os.path.exists(path):
            os.makedirs(path)

        name = "best_model.ckpt" if name is None else f"{name}.ckpt"

        torch.save(self.policy.state_dict(), f"{path}/{name}")

    def load(self, path: str = None, name: str = None) -> Self:
        """Load all model weights.

        Args:
            path (str): where to look for the model's weights. Defaults to None.

        Raises:
            ValueError: if the path does not exist.
        """
        path = self.save_path if path is None else path
        name = "best_model" if name is None else name

        if not os.path.exists(path):
            raise ValueError("Path does not exists.")

        self.policy.load_state_dict(
            torch.load(
                f"{path}{name}.ckpt",
                map_location=torch.device(self.device)
            )
        )

        return self

    def train(
        self,
        n_epochs: int,
        train_dataset: DataLoader,
        eval_dataset: DataLoader = None,
        always_save: bool = False
    ) -> Self:
        """Train process.

        Args:
            n_epochs (int): amount of epoch to run.
            train_dataset (DataLoader): data to train.
            eval_dataset (DataLoader): data to eval. Defaults to None.

        Returns:
            method (Self): trained method.
        """
        folder = f"./benchmark_results/bc/{self.environment_name}"
        if not os.path.exists(folder):
            os.makedirs(f"{folder}/")

        board = Tensorboard(path=folder)
        board.add_hparams(self.hyperparameters)
        self.policy.to(self.device)

        best_model = -np.inf

        pbar = range(n_epochs)
        if self.verbose:
            pbar = tqdm(pbar, desc=self.__method_name__)
        for epoch in pbar:
            train_metrics = self._train(train_dataset)
            board.add_scalars("Train", epoch="train", **train_metrics)

            if eval_dataset is not None:
                eval_metrics = self._eval(eval_dataset)
                board.add_scalars("Eval", epoch="eval", **eval_metrics)
                board.step(["train", "eval"])
            else:
                board.step("train")

            if epoch % self.enjoy_criteria == 0:
                metrics = self._enjoy()
                board.add_scalars("Enjoy", epoch="enjoy", **metrics)
                board.step("enjoy")
                if best_model < metrics["aer"] or always_save:
                    best_model = metrics["aer"]
                    self.save(name=epoch if always_save else None)

        return self

    def _train(self, dataset: DataLoader) -> Metrics:
        """Train loop.

        Args:
            dataset (DataLoader): train data.
        """
        accumulated_loss = []
        accumulated_accuracy = []

        if not self.policy.training:
            self.policy.train()

        for batch in dataset:
            state, action, _ = batch
            state = state.to(self.device)
            action = action.to(self.device)

            self.optimizer_fn.zero_grad()
            predictions = self.forward(state)

            loss = self.loss_fn(predictions, action.squeeze(1).long())
            loss.backward()
            self.optimizer_fn.step()
            accumulated_loss.append(loss.item())

            accuracy: Number = None
            if self.discrete:
                accuracy = accuracy_fn(predictions, action.squeeze(1))
            else:
                accuracy = (action - predictions).pow(2).sum(1).sqrt().mean().item()
            accumulated_accuracy.append(accuracy)

        return {"loss": np.mean(accumulated_loss), "accuracy": np.mean(accumulated_accuracy)}

    def _eval(self, dataset: DataLoader) -> Metrics:
        """Evaluation loop.

        Args:
            dataset (DataLoader): data to eval.
        """
        accumulated_accuracy = []

        if self.policy.training:
            self.policy.eval()

        for batch in dataset:
            state, action, _ = batch
            state = state.to(self.device)

            with torch.no_grad():
                predictions = self.policy(state)

            accuracy: Number = None
            if self.discrete:
                accuracy = accuracy_fn(predictions, action.squeeze(1))
            else:
                accuracy = (action - predictions).pow(2).sum(1).sqrt().mean().item()
            accumulated_accuracy.append(accuracy)

        return {"accuracy": np.mean(accumulated_accuracy)}

__init__(environment, enjoy_criteria=100, verbose=False, config_file=None)

Initialize BC method.

src/benchmark/methods/bc.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def __init__(
    self,
    environment: Env,
    enjoy_criteria: int = 100,
    verbose: bool = False,
    config_file: str = None
) -> None:
    """Initialize BC method."""
    self.enjoy_criteria = enjoy_criteria
    self.verbose = verbose
    try:
        self.environment_name = environment.spec.name
    except AttributeError:
        self.environment_name = environment.spec._env_name
    self.save_path = f"./tmp/bc/{self.environment_name}/"

    if config_file is None:
        config_file = CONFIG_FILE

    self.hyperparameters = import_hyperparameters(
        config_file,
        self.environment_name,
    )

    super().__init__(
        environment,
        self.hyperparameters
    )

_eval(dataset)

Evaluation loop.

Parameters:
  • dataset (DataLoader) –

    data to eval.

src/benchmark/methods/bc.py
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
def _eval(self, dataset: DataLoader) -> Metrics:
    """Evaluation loop.

    Args:
        dataset (DataLoader): data to eval.
    """
    accumulated_accuracy = []

    if self.policy.training:
        self.policy.eval()

    for batch in dataset:
        state, action, _ = batch
        state = state.to(self.device)

        with torch.no_grad():
            predictions = self.policy(state)

        accuracy: Number = None
        if self.discrete:
            accuracy = accuracy_fn(predictions, action.squeeze(1))
        else:
            accuracy = (action - predictions).pow(2).sum(1).sqrt().mean().item()
        accumulated_accuracy.append(accuracy)

    return {"accuracy": np.mean(accumulated_accuracy)}

_train(dataset)

Train loop.

Parameters:
  • dataset (DataLoader) –

    train data.

src/benchmark/methods/bc.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
def _train(self, dataset: DataLoader) -> Metrics:
    """Train loop.

    Args:
        dataset (DataLoader): train data.
    """
    accumulated_loss = []
    accumulated_accuracy = []

    if not self.policy.training:
        self.policy.train()

    for batch in dataset:
        state, action, _ = batch
        state = state.to(self.device)
        action = action.to(self.device)

        self.optimizer_fn.zero_grad()
        predictions = self.forward(state)

        loss = self.loss_fn(predictions, action.squeeze(1).long())
        loss.backward()
        self.optimizer_fn.step()
        accumulated_loss.append(loss.item())

        accuracy: Number = None
        if self.discrete:
            accuracy = accuracy_fn(predictions, action.squeeze(1))
        else:
            accuracy = (action - predictions).pow(2).sum(1).sqrt().mean().item()
        accumulated_accuracy.append(accuracy)

    return {"loss": np.mean(accumulated_loss), "accuracy": np.mean(accumulated_accuracy)}

forward(x)

Forward method for the method.

Parameters:
  • x (Tensor) –

    input.

Returns:
  • x( Tensor ) –

    logits output.

src/benchmark/methods/bc.py
62
63
64
65
66
67
68
69
70
71
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """Forward method for the method.

    Args:
        x (torch.Tensor): input.

    Returns:
        x (torch.Tensor): logits output.
    """
    return self.policy(x)

load(path=None, name=None)

Load all model weights.

Parameters:
  • path (str, default: None ) –

    where to look for the model's weights. Defaults to None.

Raises:
  • ValueError

    if the path does not exist.

src/benchmark/methods/bc.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
def load(self, path: str = None, name: str = None) -> Self:
    """Load all model weights.

    Args:
        path (str): where to look for the model's weights. Defaults to None.

    Raises:
        ValueError: if the path does not exist.
    """
    path = self.save_path if path is None else path
    name = "best_model" if name is None else name

    if not os.path.exists(path):
        raise ValueError("Path does not exists.")

    self.policy.load_state_dict(
        torch.load(
            f"{path}{name}.ckpt",
            map_location=torch.device(self.device)
        )
    )

    return self

save(path=None, name=None)

Save all model weights.

Parameters:
  • path (str, default: None ) –

    where to save the models. Defaults to None.

src/benchmark/methods/bc.py
73
74
75
76
77
78
79
80
81
82
83
84
85
def save(self, path: str = None, name: str = None) -> None:
    """Save all model weights.

    Args:
        path (str): where to save the models. Defaults to None.
    """
    path = self.save_path if path is None else path
    if not os.path.exists(path):
        os.makedirs(path)

    name = "best_model.ckpt" if name is None else f"{name}.ckpt"

    torch.save(self.policy.state_dict(), f"{path}/{name}")

train(n_epochs, train_dataset, eval_dataset=None, always_save=False)

Train process.

Parameters:
  • n_epochs (int) –

    amount of epoch to run.

  • train_dataset (DataLoader) –

    data to train.

  • eval_dataset (DataLoader, default: None ) –

    data to eval. Defaults to None.

Returns:
  • method( Self ) –

    trained method.

src/benchmark/methods/bc.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
def train(
    self,
    n_epochs: int,
    train_dataset: DataLoader,
    eval_dataset: DataLoader = None,
    always_save: bool = False
) -> Self:
    """Train process.

    Args:
        n_epochs (int): amount of epoch to run.
        train_dataset (DataLoader): data to train.
        eval_dataset (DataLoader): data to eval. Defaults to None.

    Returns:
        method (Self): trained method.
    """
    folder = f"./benchmark_results/bc/{self.environment_name}"
    if not os.path.exists(folder):
        os.makedirs(f"{folder}/")

    board = Tensorboard(path=folder)
    board.add_hparams(self.hyperparameters)
    self.policy.to(self.device)

    best_model = -np.inf

    pbar = range(n_epochs)
    if self.verbose:
        pbar = tqdm(pbar, desc=self.__method_name__)
    for epoch in pbar:
        train_metrics = self._train(train_dataset)
        board.add_scalars("Train", epoch="train", **train_metrics)

        if eval_dataset is not None:
            eval_metrics = self._eval(eval_dataset)
            board.add_scalars("Eval", epoch="eval", **eval_metrics)
            board.step(["train", "eval"])
        else:
            board.step("train")

        if epoch % self.enjoy_criteria == 0:
            metrics = self._enjoy()
            board.add_scalars("Enjoy", epoch="enjoy", **metrics)
            board.step("enjoy")
            if best_model < metrics["aer"] or always_save:
                best_model = metrics["aer"]
                self.save(name=epoch if always_save else None)

    return self