Skip to content

deepblocker

AutoEncoderDeepBlockerFrameEncoder

Bases: DeepBlockerFrameEncoder[Tensor]

Autoencoder class for DeepBlocker Frame encoders.

Parameters:

Name Type Description Default
hidden_dimensions Tuple[int, int]

Tuple[int, int]: Hidden dimensions

(2 * 150, 150)
num_epochs int

int: Number of epochs if training

50
batch_size int

int: Batch size

256
learning_rate float

float: Learning rate if training

0.001
loss_function

Optional[_Loss]: Loss function if training

required
optimizer

Optional[HintOrType[Optimizer]]: Optimizer if training

required
optimizer_kwargs

OptionalKwargs: Keyword arguments to inizialize optimizer

required
frame_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
frame_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing frame encoder

None
Source code in klinker/encoders/deepblocker.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
class AutoEncoderDeepBlockerFrameEncoder(DeepBlockerFrameEncoder[torch.Tensor]):
    """Autoencoder class for DeepBlocker Frame encoders.

    Args:
        hidden_dimensions: Tuple[int, int]: Hidden dimensions
        num_epochs: int: Number of epochs if training
        batch_size: int: Batch size
        learning_rate: float: Learning rate if training
        loss_function: Optional[_Loss]: Loss function if training
        optimizer: Optional[HintOrType[Optimizer]]: Optimizer if training
        optimizer_kwargs: OptionalKwargs: Keyword arguments to inizialize optimizer
        frame_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        frame_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing frame encoder
    """

    def __init__(
        self,
        hidden_dimensions: Tuple[int, int] = (2 * 150, 150),
        num_epochs: int = 50,
        batch_size: int = 256,
        learning_rate: float = 1e-3,
        frame_encoder: HintOrType[TokenizedFrameEncoder] = None,
        frame_encoder_kwargs: OptionalKwargs = None,
        **kwargs
    ):
        super().__init__(
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            frame_encoder=frame_encoder,
            frame_encoder_kwargs=frame_encoder_kwargs,
            **kwargs
        )
        self._input_dimension = -1

    @property
    def trainer_cls(self) -> Type[DeepBlockerModelTrainer[torch.Tensor]]:
        return AutoEncoderDeepBlockerModelTrainer

    def create_features(
        self, left: Frame, right: Frame
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """Features for AutoEncoder.

        Args:
          left: Frame: left attributes.
          right: Frame: right attributes.

        Returns:
            Concatenated left/right encoded, left encoded, right encoded
        """
        left_enc, right_enc = self.inner_encoder._encode_as(
            left, right, return_type="pt"
        )
        left_enc = left_enc.float()
        right_enc = right_enc.float()

        self.input_dimension = left_enc.shape[1]
        return (
            torch.concat([left_enc, right_enc]),
            left_enc,
            right_enc,
        )

create_features(left, right)

Features for AutoEncoder.

Parameters:

Name Type Description Default
left Frame

Frame: left attributes.

required
right Frame

Frame: right attributes.

required

Returns:

Type Description
Tuple[Tensor, Tensor, Tensor]

Concatenated left/right encoded, left encoded, right encoded

Source code in klinker/encoders/deepblocker.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
def create_features(
    self, left: Frame, right: Frame
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """Features for AutoEncoder.

    Args:
      left: Frame: left attributes.
      right: Frame: right attributes.

    Returns:
        Concatenated left/right encoded, left encoded, right encoded
    """
    left_enc, right_enc = self.inner_encoder._encode_as(
        left, right, return_type="pt"
    )
    left_enc = left_enc.float()
    right_enc = right_enc.float()

    self.input_dimension = left_enc.shape[1]
    return (
        torch.concat([left_enc, right_enc]),
        left_enc,
        right_enc,
    )

CrossTupleTrainingDeepBlockerFrameEncoder

Bases: DeepBlockerFrameEncoder

CrossTupleTraining class for DeepBlocker Frame encoders.

Parameters:

Name Type Description Default
hidden_dimensions Tuple[int, int]

Tuple[int, int]: Hidden dimensions

(2 * 150, 150)
num_epochs int

int: Number of epochs

50
batch_size int

int: Batch size

256
learning_rate float

float: Learning rate

0.001
synth_tuples_per_tuple int

int: Synthetic tuples per tuple

5
pos_to_neg_ratio float

float: Ratio of positiv to negative tuples

1.0
max_perturbation float

float: Degree how much tuples should be corrupted

0.4
random_seed

Seed to control randomness

None
loss_function Optional[_Loss]

Optional[_Loss]: Loss function if training

None
optimizer Optional[HintOrType[Optimizer]]

Optional[HintOrType[Optimizer]]: Optimizer if training

None
optimizer_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments to inizialize optimizer

None
frame_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
frame_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing frame encoder

None
Source code in klinker/encoders/deepblocker.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
class CrossTupleTrainingDeepBlockerFrameEncoder(DeepBlockerFrameEncoder):
    """CrossTupleTraining class for DeepBlocker Frame encoders.

    Args:
        hidden_dimensions: Tuple[int, int]: Hidden dimensions
        num_epochs: int: Number of epochs
        batch_size: int: Batch size
        learning_rate: float: Learning rate
        synth_tuples_per_tuple: int: Synthetic tuples per tuple
        pos_to_neg_ratio: float: Ratio of positiv to negative tuples
        max_perturbation:float: Degree how much tuples should be corrupted
        random_seed: Seed to control randomness
        loss_function: Optional[_Loss]: Loss function if training
        optimizer: Optional[HintOrType[Optimizer]]: Optimizer if training
        optimizer_kwargs: OptionalKwargs: Keyword arguments to inizialize optimizer
        frame_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        frame_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing frame encoder
    """

    def __init__(
        self,
        hidden_dimensions: Tuple[int, int] = (2 * 150, 150),
        num_epochs: int = 50,
        batch_size: int = 256,
        learning_rate: float = 1e-3,
        synth_tuples_per_tuple: int = 5,
        pos_to_neg_ratio: float = 1.0,
        max_perturbation: float = 0.4,
        random_seed=None,
        loss_function: Optional[_Loss] = None,
        optimizer: Optional[HintOrType[Optimizer]] = None,
        optimizer_kwargs: OptionalKwargs = None,
        frame_encoder: HintOrType[TokenizedFrameEncoder] = None,
        frame_encoder_kwargs: OptionalKwargs = None,
    ):

        super().__init__(
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            frame_encoder=frame_encoder,
            frame_encoder_kwargs=frame_encoder_kwargs,
            loss_function=loss_function,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )
        self.synth_tuples_per_tuple = synth_tuples_per_tuple
        self.pos_to_neg_ratio = pos_to_neg_ratio
        self.max_perturbation = max_perturbation
        self.random_seed = random_seed

    def create_features(
        self, left: Frame, right: Frame
    ) -> Tuple[
        Tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor, torch.Tensor
    ]:
        """Create features for cross-tuple training

        Args:
          left: Frame: left attributes.
          right: Frame: right attributes.

        Returns:
            (left_training, right_training, labels), left encoded, right encoded
        """
        if isinstance(left, KlinkerDaskFrame):
            raise NotImplementedError(
                "CrossTupleTrainingDeepBlockerFrameEncoder has not been implemented for dask yet!"
            )

        # TODO refactor this function (copy-pasted from deepblocker repo)
        list_of_tuples = pd.DataFrame(
            np.concatenate([left.values, right.values]), columns=["merged"]
        )["merged"]
        num_positives_per_tuple = self.synth_tuples_per_tuple
        num_negatives_per_tuple = int(
            self.synth_tuples_per_tuple * self.pos_to_neg_ratio
        )
        num_tuples = len(list_of_tuples)
        total_number_of_elems = int(
            num_tuples * (num_positives_per_tuple + num_negatives_per_tuple)
        )

        # We create three lists containing T, T' and L respectively
        # We use the following format: first num_tuples * num_positives_per_tuple correspond to T
        # and the remaining correspond to T'
        left_tuple_list = ["" for _ in range(total_number_of_elems)]
        right_tuple_list = ["" for _ in range(total_number_of_elems)]
        label_list = [0 for _ in range(total_number_of_elems)]

        random.seed(self.random_seed)

        tokenizer = self.inner_encoder.tokenizer_fn
        for index in range(len(list_of_tuples)):
            tokenized_tuple = tokenizer(list_of_tuples[index])
            max_tokens_to_remove = int(len(tokenized_tuple) * self.max_perturbation)

            training_data_index = index * (
                num_positives_per_tuple + num_negatives_per_tuple
            )

            # Create num_positives_per_tuple tuple pairs with positive label
            for _ in range(num_positives_per_tuple):
                tokenized_tuple_copy = tokenized_tuple[:]

                # If the tuple has 10 words and max_tokens_to_remove is 0.5, then we can remove at most 5 words
                # we choose a random number between 0 and 5.
                # suppose it is 3. Then we randomly remove 3 words
                num_tokens_to_remove = random.randint(0, max_tokens_to_remove)
                for _ in range(num_tokens_to_remove):
                    # randint is inclusive. so randint(0, 5) can return 5 also
                    tokenized_tuple_copy.pop(
                        random.randint(0, len(tokenized_tuple_copy) - 1)
                    )

                left_tuple_list[training_data_index] = list_of_tuples[index]
                right_tuple_list[training_data_index] = " ".join(tokenized_tuple_copy)
                label_list[training_data_index] = 1
                training_data_index += 1

            for _ in range(num_negatives_per_tuple):
                left_tuple_list[training_data_index] = list_of_tuples[index]
                right_tuple_list[training_data_index] = random.choice(list_of_tuples)
                label_list[training_data_index] = 0
                training_data_index += 1

        left_train_enc, right_train_enc = self.inner_encoder._encode_as(
            pd.DataFrame(left_tuple_list),
            pd.DataFrame(right_tuple_list),
            return_type="pt",
        )
        self.input_dimension = left_train_enc.shape[1]

        left_enc, right_enc = self.inner_encoder._encode_as(
            left, right, return_type="pt"
        )
        return (
            (left_train_enc.float(), right_train_enc.float(), torch.tensor(label_list)),
            left_enc.float(),
            right_enc.float(),
        )

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        self.inner_encoder.prepare(left, right)
        (
            (left_train, right_train, label_list),
            left_enc,
            right_enc,
        ) = self.create_features(left, right)

        assert self.input_dimension is not None
        trainer = CTTDeepBlockerModelTrainer(
            input_dimension=self.input_dimension,
            hidden_dimensions=self.hidden_dimensions,
            learning_rate=self.learning_rate,
        )
        features = (left_train, right_train, torch.tensor(label_list))
        device = resolve_device()
        self.ctt_model = trainer.train(
            features=features,
            num_epochs=self.num_epochs,
            batch_size=self.batch_size,
            device=device,
        )

        return self.ctt_model.encode_side(left_enc, device), self.ctt_model.encode_side(
            right_enc, device
        )

create_features(left, right)

Create features for cross-tuple training

Parameters:

Name Type Description Default
left Frame

Frame: left attributes.

required
right Frame

Frame: right attributes.

required

Returns:

Type Description
Tuple[Tuple[Tensor, Tensor, Tensor], Tensor, Tensor]

(left_training, right_training, labels), left encoded, right encoded

Source code in klinker/encoders/deepblocker.py
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
def create_features(
    self, left: Frame, right: Frame
) -> Tuple[
    Tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor, torch.Tensor
]:
    """Create features for cross-tuple training

    Args:
      left: Frame: left attributes.
      right: Frame: right attributes.

    Returns:
        (left_training, right_training, labels), left encoded, right encoded
    """
    if isinstance(left, KlinkerDaskFrame):
        raise NotImplementedError(
            "CrossTupleTrainingDeepBlockerFrameEncoder has not been implemented for dask yet!"
        )

    # TODO refactor this function (copy-pasted from deepblocker repo)
    list_of_tuples = pd.DataFrame(
        np.concatenate([left.values, right.values]), columns=["merged"]
    )["merged"]
    num_positives_per_tuple = self.synth_tuples_per_tuple
    num_negatives_per_tuple = int(
        self.synth_tuples_per_tuple * self.pos_to_neg_ratio
    )
    num_tuples = len(list_of_tuples)
    total_number_of_elems = int(
        num_tuples * (num_positives_per_tuple + num_negatives_per_tuple)
    )

    # We create three lists containing T, T' and L respectively
    # We use the following format: first num_tuples * num_positives_per_tuple correspond to T
    # and the remaining correspond to T'
    left_tuple_list = ["" for _ in range(total_number_of_elems)]
    right_tuple_list = ["" for _ in range(total_number_of_elems)]
    label_list = [0 for _ in range(total_number_of_elems)]

    random.seed(self.random_seed)

    tokenizer = self.inner_encoder.tokenizer_fn
    for index in range(len(list_of_tuples)):
        tokenized_tuple = tokenizer(list_of_tuples[index])
        max_tokens_to_remove = int(len(tokenized_tuple) * self.max_perturbation)

        training_data_index = index * (
            num_positives_per_tuple + num_negatives_per_tuple
        )

        # Create num_positives_per_tuple tuple pairs with positive label
        for _ in range(num_positives_per_tuple):
            tokenized_tuple_copy = tokenized_tuple[:]

            # If the tuple has 10 words and max_tokens_to_remove is 0.5, then we can remove at most 5 words
            # we choose a random number between 0 and 5.
            # suppose it is 3. Then we randomly remove 3 words
            num_tokens_to_remove = random.randint(0, max_tokens_to_remove)
            for _ in range(num_tokens_to_remove):
                # randint is inclusive. so randint(0, 5) can return 5 also
                tokenized_tuple_copy.pop(
                    random.randint(0, len(tokenized_tuple_copy) - 1)
                )

            left_tuple_list[training_data_index] = list_of_tuples[index]
            right_tuple_list[training_data_index] = " ".join(tokenized_tuple_copy)
            label_list[training_data_index] = 1
            training_data_index += 1

        for _ in range(num_negatives_per_tuple):
            left_tuple_list[training_data_index] = list_of_tuples[index]
            right_tuple_list[training_data_index] = random.choice(list_of_tuples)
            label_list[training_data_index] = 0
            training_data_index += 1

    left_train_enc, right_train_enc = self.inner_encoder._encode_as(
        pd.DataFrame(left_tuple_list),
        pd.DataFrame(right_tuple_list),
        return_type="pt",
    )
    self.input_dimension = left_train_enc.shape[1]

    left_enc, right_enc = self.inner_encoder._encode_as(
        left, right, return_type="pt"
    )
    return (
        (left_train_enc.float(), right_train_enc.float(), torch.tensor(label_list)),
        left_enc.float(),
        right_enc.float(),
    )

DeepBlockerFrameEncoder

Bases: Generic[FeatureType], TokenizedFrameEncoder

Base class for DeepBlocker Frame encoders.

Parameters:

Name Type Description Default
hidden_dimensions Tuple[int, int]

Tuple[int, int]: Hidden dimensions

required
num_epochs int

int: Number of epochs if training

50
batch_size int

int: Batch size

256
learning_rate float

float: Learning rate if training

0.001
loss_function Optional[_Loss]

Optional[_Loss]: Loss function if training

None
optimizer Optional[HintOrType[Optimizer]]

Optional[HintOrType[Optimizer]]: Optimizer if training

None
optimizer_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments to inizialize optimizer

None
frame_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
frame_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing frame encoder

None
Source code in klinker/encoders/deepblocker.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
class DeepBlockerFrameEncoder(Generic[FeatureType], TokenizedFrameEncoder):
    """Base class for DeepBlocker Frame encoders.

    Args:
        hidden_dimensions: Tuple[int, int]: Hidden dimensions
        num_epochs: int: Number of epochs if training
        batch_size: int: Batch size
        learning_rate: float: Learning rate if training
        loss_function: Optional[_Loss]: Loss function if training
        optimizer: Optional[HintOrType[Optimizer]]: Optimizer if training
        optimizer_kwargs: OptionalKwargs: Keyword arguments to inizialize optimizer
        frame_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        frame_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing frame encoder
    """

    inner_encoder: TokenizedFrameEncoder

    def __init__(
        self,
        hidden_dimensions: Tuple[int, int],
        num_epochs: int = 50,
        batch_size: int = 256,
        learning_rate: float = 1e-3,
        loss_function: Optional[_Loss] = None,
        optimizer: Optional[HintOrType[Optimizer]] = None,
        optimizer_kwargs: OptionalKwargs = None,
        frame_encoder: HintOrType[TokenizedFrameEncoder] = None,
        frame_encoder_kwargs: OptionalKwargs = None,
        **kwargs
    ):
        self.inner_encoder = tokenized_frame_encoder_resolver.make(
            frame_encoder, frame_encoder_kwargs
        )
        self.hidden_dimensions = hidden_dimensions
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.input_dimension: Optional[int] = None
        self.loss_function = loss_function
        self._optimizer_hint = optimizer
        self._optimizer_kwargs = optimizer_kwargs

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        return self.inner_encoder.tokenizer_fn

    @property
    def trainer_cls(self) -> Type[DeepBlockerModelTrainer[FeatureType]]:
        raise NotImplementedError

    def create_features(
        self, left: Frame, right: Frame
    ) -> Tuple[FeatureType, torch.Tensor, torch.Tensor]:
        raise NotImplementedError

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        features, left_enc, right_enc = self.create_features(left, right)
        assert self.input_dimension is not None
        assert self.hidden_dimensions is not None
        trainer = self.trainer_cls(
            input_dimension=self.input_dimension,
            hidden_dimensions=self.hidden_dimensions,
            learning_rate=self.learning_rate,
            loss_function=self.loss_function,
            optimizer=self._optimizer_hint,
            optimizer_kwargs=self._optimizer_kwargs,
        )
        device = resolve_device()
        self.model = trainer.train(
            features,
            num_epochs=self.num_epochs,
            batch_size=self.batch_size,
            device=device,
        )
        return self.model.encode_side(left_enc, device), self.model.encode_side(
            right_enc, device
        )

HybridDeepBlockerFrameEncoder

Bases: CrossTupleTrainingDeepBlockerFrameEncoder

Hybrid DeepBlocker class.

Uses both Autoencoder and CrossTupleTraining strategy.

Parameters:

Name Type Description Default
frame_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
frame_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing frame encoder

None
hidden_dimensions Tuple[int, int]

Tuple[int, int]: Hidden dimensions

(2 * 150, 150)
num_epochs int

int: Number of epochs if training

50
batch_size int

int: Batch size

256
learning_rate float

float: Learning rate

0.001
synth_tuples_per_tuple int

int: Synthetic tuples per tuple

5
pos_to_neg_ratio float

float: Ratio of positiv to negative tuples

1.0
max_perturbation

float: Degree how much tuples should be corrupted

0.4
random_seed

Seed to control randomness

None
loss_function Optional[_Loss]

Optional[_Loss]: Loss function if training

None
optimizer Optional[HintOrType[Optimizer]]

Optional[HintOrType[Optimizer]]: Optimizer if training

None
optimizer_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments to inizialize optimizer

None
Source code in klinker/encoders/deepblocker.py
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
class HybridDeepBlockerFrameEncoder(CrossTupleTrainingDeepBlockerFrameEncoder):
    """Hybrid DeepBlocker class.

    Uses both Autoencoder and CrossTupleTraining strategy.

    Args:
        frame_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        frame_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing frame encoder
        hidden_dimensions: Tuple[int, int]: Hidden dimensions
        num_epochs: int: Number of epochs if training
        batch_size: int: Batch size
        learning_rate: float: Learning rate
        synth_tuples_per_tuple: int: Synthetic tuples per tuple
        pos_to_neg_ratio: float: Ratio of positiv to negative tuples
        max_perturbation:float: Degree how much tuples should be corrupted
        random_seed: Seed to control randomness
        loss_function: Optional[_Loss]: Loss function if training
        optimizer: Optional[HintOrType[Optimizer]]: Optimizer if training
        optimizer_kwargs: OptionalKwargs: Keyword arguments to inizialize optimizer
    """

    def __init__(
        self,
        frame_encoder: HintOrType[TokenizedFrameEncoder] = None,
        frame_encoder_kwargs: OptionalKwargs = None,
        hidden_dimensions: Tuple[int, int] = (2 * 150, 150),
        num_epochs: int = 50,
        batch_size: int = 256,
        learning_rate: float = 1e-3,
        synth_tuples_per_tuple: int = 5,
        pos_to_neg_ratio: float = 1.0,
        max_perturbation=0.4,
        random_seed=None,
        loss_function: Optional[_Loss] = None,
        optimizer: Optional[HintOrType[Optimizer]] = None,
        optimizer_kwargs: OptionalKwargs = None,
    ):
        inner_encoder = AutoEncoderDeepBlockerFrameEncoder(
            frame_encoder=frame_encoder,
            frame_encoder_kwargs=frame_encoder_kwargs,
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
        )
        super().__init__(
            frame_encoder=inner_encoder,
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            synth_tuples_per_tuple=synth_tuples_per_tuple,
            pos_to_neg_ratio=pos_to_neg_ratio,
            max_perturbation=max_perturbation,
            random_seed=random_seed,
            loss_function=loss_function,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )