Skip to content

encoders

AutoEncoderDeepBlockerFrameEncoder

Bases: DeepBlockerFrameEncoder[Tensor]

Autoencoder class for DeepBlocker Frame encoders.

Parameters:

Name Type Description Default
hidden_dimensions Tuple[int, int]

Tuple[int, int]: Hidden dimensions

(2 * 150, 150)
num_epochs int

int: Number of epochs if training

50
batch_size int

int: Batch size

256
learning_rate float

float: Learning rate if training

0.001
loss_function

Optional[_Loss]: Loss function if training

required
optimizer

Optional[HintOrType[Optimizer]]: Optimizer if training

required
optimizer_kwargs

OptionalKwargs: Keyword arguments to inizialize optimizer

required
frame_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
frame_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing frame encoder

None
Source code in klinker/encoders/deepblocker.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
class AutoEncoderDeepBlockerFrameEncoder(DeepBlockerFrameEncoder[torch.Tensor]):
    """Autoencoder class for DeepBlocker Frame encoders.

    Args:
        hidden_dimensions: Tuple[int, int]: Hidden dimensions
        num_epochs: int: Number of epochs if training
        batch_size: int: Batch size
        learning_rate: float: Learning rate if training
        loss_function: Optional[_Loss]: Loss function if training
        optimizer: Optional[HintOrType[Optimizer]]: Optimizer if training
        optimizer_kwargs: OptionalKwargs: Keyword arguments to inizialize optimizer
        frame_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        frame_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing frame encoder
    """

    def __init__(
        self,
        hidden_dimensions: Tuple[int, int] = (2 * 150, 150),
        num_epochs: int = 50,
        batch_size: int = 256,
        learning_rate: float = 1e-3,
        frame_encoder: HintOrType[TokenizedFrameEncoder] = None,
        frame_encoder_kwargs: OptionalKwargs = None,
        **kwargs
    ):
        super().__init__(
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            frame_encoder=frame_encoder,
            frame_encoder_kwargs=frame_encoder_kwargs,
            **kwargs
        )
        self._input_dimension = -1

    @property
    def trainer_cls(self) -> Type[DeepBlockerModelTrainer[torch.Tensor]]:
        return AutoEncoderDeepBlockerModelTrainer

    def create_features(
        self, left: Frame, right: Frame
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """Features for AutoEncoder.

        Args:
          left: Frame: left attributes.
          right: Frame: right attributes.

        Returns:
            Concatenated left/right encoded, left encoded, right encoded
        """
        left_enc, right_enc = self.inner_encoder._encode_as(
            left, right, return_type="pt"
        )
        left_enc = left_enc.float()
        right_enc = right_enc.float()

        self.input_dimension = left_enc.shape[1]
        return (
            torch.concat([left_enc, right_enc]),
            left_enc,
            right_enc,
        )

create_features(left, right)

Features for AutoEncoder.

Parameters:

Name Type Description Default
left Frame

Frame: left attributes.

required
right Frame

Frame: right attributes.

required

Returns:

Type Description
Tuple[Tensor, Tensor, Tensor]

Concatenated left/right encoded, left encoded, right encoded

Source code in klinker/encoders/deepblocker.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
def create_features(
    self, left: Frame, right: Frame
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """Features for AutoEncoder.

    Args:
      left: Frame: left attributes.
      right: Frame: right attributes.

    Returns:
        Concatenated left/right encoded, left encoded, right encoded
    """
    left_enc, right_enc = self.inner_encoder._encode_as(
        left, right, return_type="pt"
    )
    left_enc = left_enc.float()
    right_enc = right_enc.float()

    self.input_dimension = left_enc.shape[1]
    return (
        torch.concat([left_enc, right_enc]),
        left_enc,
        right_enc,
    )

AverageEmbeddingTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Averages embeddings of tokenized entity attribute values.

Parameters:

Name Type Description Default
tokenized_word_embedder HintOrType[TokenizedWordEmbedder]

HintOrType[TokenizedWordEmbedder]: Word Embedding class,

None
tokenized_word_embedder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initalizing word embedder

None
Source code in klinker/encoders/pretrained.py
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
class AverageEmbeddingTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Averages embeddings of tokenized entity attribute values.

    Args:
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder]: Word Embedding class,
        tokenized_word_embedder_kwargs: OptionalKwargs: Keyword arguments for initalizing word embedder
    """

    def __init__(
        self,
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder] = None,
        tokenized_word_embedder_kwargs: OptionalKwargs = None,
    ):
        self.tokenized_word_embedder = tokenized_word_embedder_resolver.make(
            tokenized_word_embedder, tokenized_word_embedder_kwargs
        )

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        return self.tokenized_word_embedder.tokenizer_fn

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        if isinstance(left, dd.DataFrame):
            left = left.compute()
            right = right.compute()
        return (
            encode_frame(left, twe=self.tokenized_word_embedder),
            encode_frame(right, twe=self.tokenized_word_embedder),
        )

CrossTupleTrainingDeepBlockerFrameEncoder

Bases: DeepBlockerFrameEncoder

CrossTupleTraining class for DeepBlocker Frame encoders.

Parameters:

Name Type Description Default
hidden_dimensions Tuple[int, int]

Tuple[int, int]: Hidden dimensions

(2 * 150, 150)
num_epochs int

int: Number of epochs

50
batch_size int

int: Batch size

256
learning_rate float

float: Learning rate

0.001
synth_tuples_per_tuple int

int: Synthetic tuples per tuple

5
pos_to_neg_ratio float

float: Ratio of positiv to negative tuples

1.0
max_perturbation float

float: Degree how much tuples should be corrupted

0.4
random_seed

Seed to control randomness

None
loss_function Optional[_Loss]

Optional[_Loss]: Loss function if training

None
optimizer Optional[HintOrType[Optimizer]]

Optional[HintOrType[Optimizer]]: Optimizer if training

None
optimizer_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments to inizialize optimizer

None
frame_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
frame_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing frame encoder

None
Source code in klinker/encoders/deepblocker.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
class CrossTupleTrainingDeepBlockerFrameEncoder(DeepBlockerFrameEncoder):
    """CrossTupleTraining class for DeepBlocker Frame encoders.

    Args:
        hidden_dimensions: Tuple[int, int]: Hidden dimensions
        num_epochs: int: Number of epochs
        batch_size: int: Batch size
        learning_rate: float: Learning rate
        synth_tuples_per_tuple: int: Synthetic tuples per tuple
        pos_to_neg_ratio: float: Ratio of positiv to negative tuples
        max_perturbation:float: Degree how much tuples should be corrupted
        random_seed: Seed to control randomness
        loss_function: Optional[_Loss]: Loss function if training
        optimizer: Optional[HintOrType[Optimizer]]: Optimizer if training
        optimizer_kwargs: OptionalKwargs: Keyword arguments to inizialize optimizer
        frame_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        frame_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing frame encoder
    """

    def __init__(
        self,
        hidden_dimensions: Tuple[int, int] = (2 * 150, 150),
        num_epochs: int = 50,
        batch_size: int = 256,
        learning_rate: float = 1e-3,
        synth_tuples_per_tuple: int = 5,
        pos_to_neg_ratio: float = 1.0,
        max_perturbation: float = 0.4,
        random_seed=None,
        loss_function: Optional[_Loss] = None,
        optimizer: Optional[HintOrType[Optimizer]] = None,
        optimizer_kwargs: OptionalKwargs = None,
        frame_encoder: HintOrType[TokenizedFrameEncoder] = None,
        frame_encoder_kwargs: OptionalKwargs = None,
    ):

        super().__init__(
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            frame_encoder=frame_encoder,
            frame_encoder_kwargs=frame_encoder_kwargs,
            loss_function=loss_function,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )
        self.synth_tuples_per_tuple = synth_tuples_per_tuple
        self.pos_to_neg_ratio = pos_to_neg_ratio
        self.max_perturbation = max_perturbation
        self.random_seed = random_seed

    def create_features(
        self, left: Frame, right: Frame
    ) -> Tuple[
        Tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor, torch.Tensor
    ]:
        """Create features for cross-tuple training

        Args:
          left: Frame: left attributes.
          right: Frame: right attributes.

        Returns:
            (left_training, right_training, labels), left encoded, right encoded
        """
        if isinstance(left, KlinkerDaskFrame):
            raise NotImplementedError(
                "CrossTupleTrainingDeepBlockerFrameEncoder has not been implemented for dask yet!"
            )

        # TODO refactor this function (copy-pasted from deepblocker repo)
        list_of_tuples = pd.DataFrame(
            np.concatenate([left.values, right.values]), columns=["merged"]
        )["merged"]
        num_positives_per_tuple = self.synth_tuples_per_tuple
        num_negatives_per_tuple = int(
            self.synth_tuples_per_tuple * self.pos_to_neg_ratio
        )
        num_tuples = len(list_of_tuples)
        total_number_of_elems = int(
            num_tuples * (num_positives_per_tuple + num_negatives_per_tuple)
        )

        # We create three lists containing T, T' and L respectively
        # We use the following format: first num_tuples * num_positives_per_tuple correspond to T
        # and the remaining correspond to T'
        left_tuple_list = ["" for _ in range(total_number_of_elems)]
        right_tuple_list = ["" for _ in range(total_number_of_elems)]
        label_list = [0 for _ in range(total_number_of_elems)]

        random.seed(self.random_seed)

        tokenizer = self.inner_encoder.tokenizer_fn
        for index in range(len(list_of_tuples)):
            tokenized_tuple = tokenizer(list_of_tuples[index])
            max_tokens_to_remove = int(len(tokenized_tuple) * self.max_perturbation)

            training_data_index = index * (
                num_positives_per_tuple + num_negatives_per_tuple
            )

            # Create num_positives_per_tuple tuple pairs with positive label
            for _ in range(num_positives_per_tuple):
                tokenized_tuple_copy = tokenized_tuple[:]

                # If the tuple has 10 words and max_tokens_to_remove is 0.5, then we can remove at most 5 words
                # we choose a random number between 0 and 5.
                # suppose it is 3. Then we randomly remove 3 words
                num_tokens_to_remove = random.randint(0, max_tokens_to_remove)
                for _ in range(num_tokens_to_remove):
                    # randint is inclusive. so randint(0, 5) can return 5 also
                    tokenized_tuple_copy.pop(
                        random.randint(0, len(tokenized_tuple_copy) - 1)
                    )

                left_tuple_list[training_data_index] = list_of_tuples[index]
                right_tuple_list[training_data_index] = " ".join(tokenized_tuple_copy)
                label_list[training_data_index] = 1
                training_data_index += 1

            for _ in range(num_negatives_per_tuple):
                left_tuple_list[training_data_index] = list_of_tuples[index]
                right_tuple_list[training_data_index] = random.choice(list_of_tuples)
                label_list[training_data_index] = 0
                training_data_index += 1

        left_train_enc, right_train_enc = self.inner_encoder._encode_as(
            pd.DataFrame(left_tuple_list),
            pd.DataFrame(right_tuple_list),
            return_type="pt",
        )
        self.input_dimension = left_train_enc.shape[1]

        left_enc, right_enc = self.inner_encoder._encode_as(
            left, right, return_type="pt"
        )
        return (
            (left_train_enc.float(), right_train_enc.float(), torch.tensor(label_list)),
            left_enc.float(),
            right_enc.float(),
        )

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        self.inner_encoder.prepare(left, right)
        (
            (left_train, right_train, label_list),
            left_enc,
            right_enc,
        ) = self.create_features(left, right)

        assert self.input_dimension is not None
        trainer = CTTDeepBlockerModelTrainer(
            input_dimension=self.input_dimension,
            hidden_dimensions=self.hidden_dimensions,
            learning_rate=self.learning_rate,
        )
        features = (left_train, right_train, torch.tensor(label_list))
        device = resolve_device()
        self.ctt_model = trainer.train(
            features=features,
            num_epochs=self.num_epochs,
            batch_size=self.batch_size,
            device=device,
        )

        return self.ctt_model.encode_side(left_enc, device), self.ctt_model.encode_side(
            right_enc, device
        )

create_features(left, right)

Create features for cross-tuple training

Parameters:

Name Type Description Default
left Frame

Frame: left attributes.

required
right Frame

Frame: right attributes.

required

Returns:

Type Description
Tuple[Tuple[Tensor, Tensor, Tensor], Tensor, Tensor]

(left_training, right_training, labels), left encoded, right encoded

Source code in klinker/encoders/deepblocker.py
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
def create_features(
    self, left: Frame, right: Frame
) -> Tuple[
    Tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor, torch.Tensor
]:
    """Create features for cross-tuple training

    Args:
      left: Frame: left attributes.
      right: Frame: right attributes.

    Returns:
        (left_training, right_training, labels), left encoded, right encoded
    """
    if isinstance(left, KlinkerDaskFrame):
        raise NotImplementedError(
            "CrossTupleTrainingDeepBlockerFrameEncoder has not been implemented for dask yet!"
        )

    # TODO refactor this function (copy-pasted from deepblocker repo)
    list_of_tuples = pd.DataFrame(
        np.concatenate([left.values, right.values]), columns=["merged"]
    )["merged"]
    num_positives_per_tuple = self.synth_tuples_per_tuple
    num_negatives_per_tuple = int(
        self.synth_tuples_per_tuple * self.pos_to_neg_ratio
    )
    num_tuples = len(list_of_tuples)
    total_number_of_elems = int(
        num_tuples * (num_positives_per_tuple + num_negatives_per_tuple)
    )

    # We create three lists containing T, T' and L respectively
    # We use the following format: first num_tuples * num_positives_per_tuple correspond to T
    # and the remaining correspond to T'
    left_tuple_list = ["" for _ in range(total_number_of_elems)]
    right_tuple_list = ["" for _ in range(total_number_of_elems)]
    label_list = [0 for _ in range(total_number_of_elems)]

    random.seed(self.random_seed)

    tokenizer = self.inner_encoder.tokenizer_fn
    for index in range(len(list_of_tuples)):
        tokenized_tuple = tokenizer(list_of_tuples[index])
        max_tokens_to_remove = int(len(tokenized_tuple) * self.max_perturbation)

        training_data_index = index * (
            num_positives_per_tuple + num_negatives_per_tuple
        )

        # Create num_positives_per_tuple tuple pairs with positive label
        for _ in range(num_positives_per_tuple):
            tokenized_tuple_copy = tokenized_tuple[:]

            # If the tuple has 10 words and max_tokens_to_remove is 0.5, then we can remove at most 5 words
            # we choose a random number between 0 and 5.
            # suppose it is 3. Then we randomly remove 3 words
            num_tokens_to_remove = random.randint(0, max_tokens_to_remove)
            for _ in range(num_tokens_to_remove):
                # randint is inclusive. so randint(0, 5) can return 5 also
                tokenized_tuple_copy.pop(
                    random.randint(0, len(tokenized_tuple_copy) - 1)
                )

            left_tuple_list[training_data_index] = list_of_tuples[index]
            right_tuple_list[training_data_index] = " ".join(tokenized_tuple_copy)
            label_list[training_data_index] = 1
            training_data_index += 1

        for _ in range(num_negatives_per_tuple):
            left_tuple_list[training_data_index] = list_of_tuples[index]
            right_tuple_list[training_data_index] = random.choice(list_of_tuples)
            label_list[training_data_index] = 0
            training_data_index += 1

    left_train_enc, right_train_enc = self.inner_encoder._encode_as(
        pd.DataFrame(left_tuple_list),
        pd.DataFrame(right_tuple_list),
        return_type="pt",
    )
    self.input_dimension = left_train_enc.shape[1]

    left_enc, right_enc = self.inner_encoder._encode_as(
        left, right, return_type="pt"
    )
    return (
        (left_train_enc.float(), right_train_enc.float(), torch.tensor(label_list)),
        left_enc.float(),
        right_enc.float(),
    )

FrameEncoder

Base class for encoding a KlinkerFrame as embedding.

Source code in klinker/encoders/base.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
class FrameEncoder:
    """Base class for encoding a KlinkerFrame as embedding."""

    def validate(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ):
        """Check if frames only consist of one column.

        Args:
          left: Frame: left attributes.
          right: Frame: right attributes.
          left_rel: Optional[Frame]: left relation triples.
          right_rel: Optional[Frame]: right relation triples.

        Raises:
            ValueError left/right have more than one column.
        """
        if len(left.columns) != 1 or len(right.columns) != 1:
            raise ValueError(
                "Input DataFrames must consist of single column containing all attribute values!"
            )

    def prepare(self, left: Frame, right: Frame) -> Tuple[Frame, Frame]:
        """Prepare for embedding (fill NaNs with empty string).

        Args:
          left: Frame: left attributes.
          right: Frame: right attributes.

        Returns:
            left, right
        """
        return left.fillna(""), right.fillna("")

    def _encode(
        self,
        left: Frame,
        right: Frame,
        *,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        raise NotImplementedError

    @overload
    def _encode_as(
        self,
        left: Frame,
        right: Frame,
        *,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
        return_type: Literal["np"],
    ) -> Tuple[np.ndarray, np.ndarray]:
        ...

    @overload
    def _encode_as(
        self,
        left: Frame,
        right: Frame,
        *,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
        return_type: Literal["pt"],
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        ...

    def _encode_as(
        self,
        left: Frame,
        right: Frame,
        *,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
        return_type: GeneralVectorLiteral = "pt",
    ) -> Tuple[GeneralVector, GeneralVector]:
        left_enc, right_enc = self._encode(
            left=left, right=right, left_rel=left_rel, right_rel=right_rel
        )
        left_enc = cast_general_vector(left_enc, return_type=return_type)
        right_enc = cast_general_vector(right_enc, return_type=return_type)
        return left_enc, right_enc

    def encode(
        self,
        left: Frame,
        right: Frame,
        *,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
        return_type: GeneralVectorLiteral = "pt",
    ) -> Tuple[NamedVector, NamedVector]:
        """Encode dataframes into named vectors.

        Args:
          left: Frame: left attribute information.
          right: Frame: right attribute information.
          left_rel: Optional[Frame]: left relation triples.
          right_rel: Optional[Frame]: right relation triples.
          return_type: GeneralVectorLiteral:  Either `pt` or `np` to return as pytorch tensor or numpy array.

        Returns:
            Embeddings of given left/right dataset.
        """
        self.validate(left, right)
        # TODO check if series can't be used everywhere instead
        # of upgrading in prepare
        left, right = self.prepare(left, right)
        start = time.time()
        left_enc, right_enc = self._encode_as(
            left=left,
            right=right,
            left_rel=left_rel,
            right_rel=right_rel,
            return_type=return_type,
        )
        end = time.time()
        self._encoding_time = end - start
        if isinstance(left, dd.DataFrame):
            left_names = left.index.compute().tolist()
            right_names = right.index.compute().tolist()
        else:
            left_names = left.index.tolist()
            right_names = right.index.tolist()
        return NamedVector(names=left_names, vectors=left_enc), NamedVector(
            names=right_names, vectors=right_enc
        )

encode(left, right, *, left_rel=None, right_rel=None, return_type='pt')

Encode dataframes into named vectors.

Parameters:

Name Type Description Default
left Frame

Frame: left attribute information.

required
right Frame

Frame: right attribute information.

required
left_rel Optional[Frame]

Optional[Frame]: left relation triples.

None
right_rel Optional[Frame]

Optional[Frame]: right relation triples.

None
return_type GeneralVectorLiteral

GeneralVectorLiteral: Either pt or np to return as pytorch tensor or numpy array.

'pt'

Returns:

Type Description
Tuple[NamedVector, NamedVector]

Embeddings of given left/right dataset.

Source code in klinker/encoders/base.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
def encode(
    self,
    left: Frame,
    right: Frame,
    *,
    left_rel: Optional[Frame] = None,
    right_rel: Optional[Frame] = None,
    return_type: GeneralVectorLiteral = "pt",
) -> Tuple[NamedVector, NamedVector]:
    """Encode dataframes into named vectors.

    Args:
      left: Frame: left attribute information.
      right: Frame: right attribute information.
      left_rel: Optional[Frame]: left relation triples.
      right_rel: Optional[Frame]: right relation triples.
      return_type: GeneralVectorLiteral:  Either `pt` or `np` to return as pytorch tensor or numpy array.

    Returns:
        Embeddings of given left/right dataset.
    """
    self.validate(left, right)
    # TODO check if series can't be used everywhere instead
    # of upgrading in prepare
    left, right = self.prepare(left, right)
    start = time.time()
    left_enc, right_enc = self._encode_as(
        left=left,
        right=right,
        left_rel=left_rel,
        right_rel=right_rel,
        return_type=return_type,
    )
    end = time.time()
    self._encoding_time = end - start
    if isinstance(left, dd.DataFrame):
        left_names = left.index.compute().tolist()
        right_names = right.index.compute().tolist()
    else:
        left_names = left.index.tolist()
        right_names = right.index.tolist()
    return NamedVector(names=left_names, vectors=left_enc), NamedVector(
        names=right_names, vectors=right_enc
    )

prepare(left, right)

Prepare for embedding (fill NaNs with empty string).

Parameters:

Name Type Description Default
left Frame

Frame: left attributes.

required
right Frame

Frame: right attributes.

required

Returns:

Type Description
Tuple[Frame, Frame]

left, right

Source code in klinker/encoders/base.py
45
46
47
48
49
50
51
52
53
54
55
def prepare(self, left: Frame, right: Frame) -> Tuple[Frame, Frame]:
    """Prepare for embedding (fill NaNs with empty string).

    Args:
      left: Frame: left attributes.
      right: Frame: right attributes.

    Returns:
        left, right
    """
    return left.fillna(""), right.fillna("")

validate(left, right, left_rel=None, right_rel=None)

Check if frames only consist of one column.

Parameters:

Name Type Description Default
left Frame

Frame: left attributes.

required
right Frame

Frame: right attributes.

required
left_rel Optional[Frame]

Optional[Frame]: left relation triples.

None
right_rel Optional[Frame]

Optional[Frame]: right relation triples.

None
Source code in klinker/encoders/base.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
def validate(
    self,
    left: Frame,
    right: Frame,
    left_rel: Optional[Frame] = None,
    right_rel: Optional[Frame] = None,
):
    """Check if frames only consist of one column.

    Args:
      left: Frame: left attributes.
      right: Frame: right attributes.
      left_rel: Optional[Frame]: left relation triples.
      right_rel: Optional[Frame]: right relation triples.

    Raises:
        ValueError left/right have more than one column.
    """
    if len(left.columns) != 1 or len(right.columns) != 1:
        raise ValueError(
            "Input DataFrames must consist of single column containing all attribute values!"
        )

GCNFrameEncoder

Bases: RelationFrameEncoder

Use untrained GCN for aggregating neighboring embeddings with self.

Parameters:

Name Type Description Default
depth int

How many hops of neighbors should be incorporated

2
edge_weight float

Weighting of non-self-loops

1.0
self_loop_weight float

Weighting of self-loops

2.0
layer_dims int

Dimensionality of layers if used

300
bias bool

Whether to use bias in layers

False
use_weight_layers bool

Whether to use randomly initialized layers in aggregation

True
aggr str

Which aggregation to use. Can be :obj:"sum", :obj:"mean", :obj:"min" or :obj:"max"

'sum'
attribute_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
attribute_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing encoder

None
Source code in klinker/encoders/gcn.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
class GCNFrameEncoder(RelationFrameEncoder):
    """Use untrained GCN for aggregating neighboring embeddings with self.

    Args:
        depth: How many hops of neighbors should be incorporated
        edge_weight: Weighting of non-self-loops
        self_loop_weight: Weighting of self-loops
        layer_dims: Dimensionality of layers if used
        bias: Whether to use bias in layers
        use_weight_layers: Whether to use randomly initialized layers in aggregation
        aggr: Which aggregation to use. Can be :obj:`"sum"`, :obj:`"mean"`, :obj:`"min"` or :obj:`"max"`
        attribute_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        attribute_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing encoder
    """

    def __init__(
        self,
        depth: int = 2,
        edge_weight: float = 1.0,
        self_loop_weight: float = 2.0,
        layer_dims: int = 300,
        bias: bool = False,
        use_weight_layers: bool = True,
        aggr: str = "sum",
        attribute_encoder: HintOrType[TokenizedFrameEncoder] = None,
        attribute_encoder_kwargs: OptionalKwargs = None,
    ):
        if not TORCH_SCATTER:
            logger.error("Could not find torch_scatter and/or torch_sparse package!")
        self.depth = depth
        self.edge_weight = edge_weight
        self.self_loop_weight = self_loop_weight
        self.device = resolve_device()
        self.attribute_encoder = tokenized_frame_encoder_resolver.make(
            attribute_encoder, attribute_encoder_kwargs
        )
        layers: List[BasicMessagePassing]
        if use_weight_layers:
            layers = [
                FrozenGCNConv(
                    in_channels=layer_dims,
                    out_channels=layer_dims,
                    edge_weight=edge_weight,
                    self_loop_weight=self_loop_weight,
                    aggr=aggr,
                )
                for _ in range(self.depth)
            ]
        else:
            layers = [
                BasicMessagePassing(
                    edge_weight=edge_weight,
                    self_loop_weight=self_loop_weight,
                    aggr=aggr,
                )
                for _ in range(self.depth)
            ]
        self.layers = layers

    def _encode_rel(
        self,
        rel_triples_left: np.ndarray,
        rel_triples_right: np.ndarray,
        ent_features: NamedVector,
    ) -> GeneralVector:
        full_graph = np.concatenate([rel_triples_left, rel_triples_right])
        edge_index = torch.from_numpy(full_graph[:, [0, 2]]).t()
        x = ent_features.vectors
        for layer in self.layers:
            x = layer.forward(x, edge_index)
        return x

HybridDeepBlockerFrameEncoder

Bases: CrossTupleTrainingDeepBlockerFrameEncoder

Hybrid DeepBlocker class.

Uses both Autoencoder and CrossTupleTraining strategy.

Parameters:

Name Type Description Default
frame_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Base encoder class

None
frame_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing frame encoder

None
hidden_dimensions Tuple[int, int]

Tuple[int, int]: Hidden dimensions

(2 * 150, 150)
num_epochs int

int: Number of epochs if training

50
batch_size int

int: Batch size

256
learning_rate float

float: Learning rate

0.001
synth_tuples_per_tuple int

int: Synthetic tuples per tuple

5
pos_to_neg_ratio float

float: Ratio of positiv to negative tuples

1.0
max_perturbation

float: Degree how much tuples should be corrupted

0.4
random_seed

Seed to control randomness

None
loss_function Optional[_Loss]

Optional[_Loss]: Loss function if training

None
optimizer Optional[HintOrType[Optimizer]]

Optional[HintOrType[Optimizer]]: Optimizer if training

None
optimizer_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments to inizialize optimizer

None
Source code in klinker/encoders/deepblocker.py
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
class HybridDeepBlockerFrameEncoder(CrossTupleTrainingDeepBlockerFrameEncoder):
    """Hybrid DeepBlocker class.

    Uses both Autoencoder and CrossTupleTraining strategy.

    Args:
        frame_encoder: HintOrType[TokenizedFrameEncoder]: Base encoder class
        frame_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing frame encoder
        hidden_dimensions: Tuple[int, int]: Hidden dimensions
        num_epochs: int: Number of epochs if training
        batch_size: int: Batch size
        learning_rate: float: Learning rate
        synth_tuples_per_tuple: int: Synthetic tuples per tuple
        pos_to_neg_ratio: float: Ratio of positiv to negative tuples
        max_perturbation:float: Degree how much tuples should be corrupted
        random_seed: Seed to control randomness
        loss_function: Optional[_Loss]: Loss function if training
        optimizer: Optional[HintOrType[Optimizer]]: Optimizer if training
        optimizer_kwargs: OptionalKwargs: Keyword arguments to inizialize optimizer
    """

    def __init__(
        self,
        frame_encoder: HintOrType[TokenizedFrameEncoder] = None,
        frame_encoder_kwargs: OptionalKwargs = None,
        hidden_dimensions: Tuple[int, int] = (2 * 150, 150),
        num_epochs: int = 50,
        batch_size: int = 256,
        learning_rate: float = 1e-3,
        synth_tuples_per_tuple: int = 5,
        pos_to_neg_ratio: float = 1.0,
        max_perturbation=0.4,
        random_seed=None,
        loss_function: Optional[_Loss] = None,
        optimizer: Optional[HintOrType[Optimizer]] = None,
        optimizer_kwargs: OptionalKwargs = None,
    ):
        inner_encoder = AutoEncoderDeepBlockerFrameEncoder(
            frame_encoder=frame_encoder,
            frame_encoder_kwargs=frame_encoder_kwargs,
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
        )
        super().__init__(
            frame_encoder=inner_encoder,
            hidden_dimensions=hidden_dimensions,
            num_epochs=num_epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            synth_tuples_per_tuple=synth_tuples_per_tuple,
            pos_to_neg_ratio=pos_to_neg_ratio,
            max_perturbation=max_perturbation,
            random_seed=random_seed,
            loss_function=loss_function,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )

LightEAFrameEncoder

Bases: RelationFrameEncoder

Use LightEA algorithm to encode frame.

Parameters:

Name Type Description Default
ent_dim int

int: Entity dimensions

256
depth int

int: Number of hops

2
mini_dim int

int: Mini batching size

16
rel_dim Optional[int]

int: relation embedding dimensions (same as ent_dim if None)

None
attribute_encoder HintOrType[TokenizedFrameEncoder]

HintOrType[TokenizedFrameEncoder]: Attribute encoder class

None
attribute_encoder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initializing attribute encoder class

None
Reference

Mao et. al.,"LightEA: A Scalable, Robust, and Interpretable Entity Alignment Framework via Three-view Label Propagation", EMNLP 2022 https://aclanthology.org/2022.emnlp-main.52.pdf

Source code in klinker/encoders/light_ea.py
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
class LightEAFrameEncoder(RelationFrameEncoder):
    """Use LightEA algorithm to encode frame.

    Args:
        ent_dim: int: Entity dimensions
        depth: int: Number of hops
        mini_dim:int: Mini batching size
        rel_dim:int: relation embedding dimensions (same as ent_dim if None)
        attribute_encoder: HintOrType[TokenizedFrameEncoder]: Attribute encoder class
        attribute_encoder_kwargs: OptionalKwargs: Keyword arguments for initializing attribute encoder class

    Quote: Reference
        Mao et. al.,"LightEA: A Scalable, Robust, and Interpretable Entity Alignment Framework via Three-view Label Propagation", EMNLP 2022 <https://aclanthology.org/2022.emnlp-main.52.pdf>
    """

    def __init__(
        self,
        ent_dim: int = 256,
        depth: int = 2,
        mini_dim: int = 16,
        rel_dim: Optional[int] = None,
        attribute_encoder: HintOrType[TokenizedFrameEncoder] = None,
        attribute_encoder_kwargs: OptionalKwargs = None,
    ):
        self.ent_dim = ent_dim
        self.depth = depth
        self.device = resolve_device()
        self.mini_dim = mini_dim
        self.rel_dim = ent_dim if rel_dim is None else rel_dim
        self.attribute_encoder = tokenized_frame_encoder_resolver.make(
            attribute_encoder, attribute_encoder_kwargs
        )

    def _encode_rel(
        self,
        rel_triples_left: np.ndarray,
        rel_triples_right: np.ndarray,
        ent_features: NamedVector,
    ) -> GeneralVector:
        (
            node_size,
            rel_size,
            ent_tuple,
            triples_idx,
            ent_ent,
            ent_ent_val,
            rel_ent,
            ent_rel,
        ) = self._transform_graph(rel_triples_left, rel_triples_right)
        return self._get_features(
            node_size,
            rel_size,
            ent_tuple,
            triples_idx,
            ent_ent,
            ent_ent_val,
            rel_ent,
            ent_rel,
            ent_features.vectors,
        )

    def _transform_graph(
        self, rel_triples_left: np.ndarray, rel_triples_right: np.ndarray
    ):
        triples = []
        rel_size = 0
        for line in rel_triples_left:
            h, r, t = line
            triples.append([h, t, 2 * r])
            triples.append([t, h, 2 * r + 1])
            rel_size = max(rel_size, 2 * r + 1)
        for line in rel_triples_right:
            h, r, t = line
            triples.append([h, t, 2 * r])
            triples.append([t, h, 2 * r + 1])
            rel_size = max(rel_size, 2 * r + 1)
        triples = np.unique(triples, axis=0)
        node_size, rel_size = np.max(triples) + 1, np.max(triples[:, 2]) + 1  # type: ignore
        ent_tuple, triples_idx = [], []
        ent_ent_s, rel_ent_s, ent_rel_s = {}, set(), set()
        last, index = (-1, -1), -1

        for i in range(node_size):
            ent_ent_s[(i, i)] = 0

        for h, t, r in triples:
            ent_ent_s[(h, h)] += 1
            ent_ent_s[(t, t)] += 1

            if (h, t) != last:
                last = (h, t)
                index += 1
                ent_tuple.append([h, t])
                ent_ent_s[(h, t)] = 0

            triples_idx.append([index, r])
            ent_ent_s[(h, t)] += 1
            rel_ent_s.add((r, h))
            ent_rel_s.add((t, r))

        ent_tuple = np.array(ent_tuple)  # type: ignore
        triples_idx = np.unique(np.array(triples_idx), axis=0)  # type: ignore

        ent_ent = np.unique(np.array(list(ent_ent_s.keys())), axis=0)
        ent_ent_val = np.array([ent_ent_s[(x, y)] for x, y in ent_ent]).astype(
            "float32"
        )
        rel_ent = np.unique(np.array(list(rel_ent_s)), axis=0)
        ent_rel = np.unique(np.array(list(ent_rel_s)), axis=0)
        return (
            node_size,
            rel_size,
            ent_tuple,
            triples_idx,
            ent_ent,
            ent_ent_val,
            rel_ent,
            ent_rel,
        )

    @torch.no_grad()
    def _get_features(
        self,
        node_size,
        rel_size,
        ent_tuple,
        triples_idx,
        ent_ent,
        ent_ent_val,
        rel_ent,
        ent_rel,
        ent_feature,
    ):
        ent_feature = ent_feature.to(self.device)
        rel_feature = torch.zeros((rel_size, ent_feature.shape[-1])).to(self.device)

        ent_ent, ent_rel, rel_ent, ent_ent_val, triples_idx, ent_tuple = map(
            torch.tensor,
            [ent_ent, ent_rel, rel_ent, ent_ent_val, triples_idx, ent_tuple],
        )

        ent_ent = ent_ent.t()
        ent_rel = ent_rel.t()
        rel_ent = rel_ent.t()
        triples_idx = triples_idx.t()
        ent_tuple = ent_tuple.t()

        ent_ent_graph = torch.sparse_coo_tensor(
            indices=ent_ent, values=ent_ent_val, size=(node_size, node_size)
        ).to(self.device)
        rel_ent_graph = torch.sparse_coo_tensor(
            indices=rel_ent,
            values=torch.ones(rel_ent.shape[1]),
            size=(rel_size, node_size),
        ).to(self.device)
        ent_rel_graph = torch.sparse_coo_tensor(
            indices=ent_rel,
            values=torch.ones(ent_rel.shape[1]),
            size=(node_size, rel_size),
        ).to(self.device)

        ent_list, rel_list = [ent_feature], [rel_feature]
        for _ in trange(self.depth):
            new_rel_feature = torch.from_numpy(
                _batch_sparse_matmul(rel_ent_graph, ent_feature, self.device)
            ).to(self.device)
            new_rel_feature = _my_norm(new_rel_feature)

            new_ent_feature = torch.from_numpy(
                _batch_sparse_matmul(ent_ent_graph, ent_feature, self.device)
            ).to(self.device)
            new_ent_feature += torch.from_numpy(
                _batch_sparse_matmul(ent_rel_graph, rel_feature, self.device)
            ).to(self.device)
            new_ent_feature = _my_norm(new_ent_feature)

            ent_feature = new_ent_feature
            rel_feature = new_rel_feature
            ent_list.append(ent_feature)
            rel_list.append(rel_feature)

        ent_feature = torch.cat(ent_list, dim=1)
        rel_feature = torch.cat(rel_list, dim=1)

        ent_feature = _my_norm(ent_feature)
        rel_feature = _my_norm(rel_feature)
        rel_feature = _random_projection(rel_feature, self.rel_dim, self.device)
        batch_size = ent_feature.shape[-1] // self.mini_dim
        sparse_graph = torch.sparse_coo_tensor(
            indices=triples_idx,
            values=torch.ones(triples_idx.shape[1]),
            size=(torch.max(triples_idx).item() + 1, rel_size),
        ).to(self.device)
        adj_value = _batch_sparse_matmul(sparse_graph, rel_feature, self.device)
        del rel_feature

        features_list = []

        for batch in trange(self.rel_dim // batch_size + 1):
            temp_list = []
            for head in trange(batch_size):
                if batch * batch_size + head >= self.rel_dim:
                    break
                sparse_graph = torch.sparse_coo_tensor(
                    indices=ent_tuple,
                    values=adj_value[:, batch * batch_size + head],
                    size=(node_size, node_size),
                ).to(self.device)
                feature = _batch_sparse_matmul(
                    sparse_graph,
                    _random_projection(ent_feature, self.mini_dim, self.device).to(
                        self.device
                    ),
                    self.device,
                    batch_size=128,
                    save_mem=True,
                )
                temp_list.append(feature)
            if len(temp_list):
                features_list.append(np.concatenate(temp_list, axis=-1))
        features = np.concatenate(features_list, axis=-1)
        features = normalize(features)
        features = np.concatenate([ent_feature.cpu().numpy(), features], axis=-1)
        return features

RelationFrameEncoder

Bases: FrameEncoder

Base class for Encoders, that also utilize relational information.

Source code in klinker/encoders/base.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
class RelationFrameEncoder(FrameEncoder):
    """Base class for Encoders, that also utilize relational information."""

    attribute_encoder: FrameEncoder

    def validate(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ):
        """Ensure relation info is provided and attribute frames consist of single column.

        Args:
          left: Frame: left attribute information.
          right: Frame: right attribute information.
          left_rel: Optional[Frame]: left relation triples.
          right_rel: Optional[Frame]: right relation triples.

        Raises:
            ValueError: If attribute frames consist of multiple columns or relational frames are missing.
        """
        super().validate(left=left, right=right)
        if left_rel is None or right_rel is None:
            raise ValueError(f"{self.__class__.__name__} needs left_rel and right_rel!")

    def _encode_rel(
        self,
        rel_triples_left: np.ndarray,
        rel_triples_right: np.ndarray,
        ent_features: NamedVector,
    ) -> GeneralVector:
        raise NotImplementedError

    @overload
    def _encode_rel_as(
        self,
        rel_triples_left: np.ndarray,
        rel_triples_right: np.ndarray,
        ent_features: NamedVector,
        return_type: Literal["np"],
    ) -> np.ndarray:
        ...

    @overload
    def _encode_rel_as(
        self,
        rel_triples_left: np.ndarray,
        rel_triples_right: np.ndarray,
        ent_features: NamedVector,
        return_type: Literal["pt"],
    ) -> torch.Tensor:
        ...

    def _encode_rel_as(
        self,
        rel_triples_left: np.ndarray,
        rel_triples_right: np.ndarray,
        ent_features: NamedVector,
        return_type: GeneralVectorLiteral = "pt",
    ) -> GeneralVector:
        enc = self._encode_rel(
            rel_triples_left=rel_triples_left,
            rel_triples_right=rel_triples_right,
            ent_features=ent_features,
        )
        return cast_general_vector(enc, return_type=return_type)

    def encode(
        self,
        left: SeriesType,
        right: SeriesType,
        *,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
        return_type: GeneralVectorLiteral = "pt",
    ) -> Tuple[NamedVector, NamedVector]:
        """Encode dataframes into named vectors.

        Args:
          left: Frame: left attribute information.
          right: Frame: right attribute information.
          *:
          left_rel: Optional[Frame]: left relation triples.
          right_rel: Optional[Frame]: right relation triples.
          return_type: GeneralVectorLiteral:  Either `pt` or `np` to return as pytorch tensor or numpy array.

        Returns:
            Embeddings of given left/right dataset.
        """
        self.validate(left=left, right=right, left_rel=left_rel, right_rel=right_rel)
        left, right = self.prepare(left, right)

        start = time.time()
        # encode attributes
        left_attr_enc, right_attr_enc = self.attribute_encoder.encode(
            left, right, return_type=return_type
        )
        all_attr_enc = left_attr_enc.concat(right_attr_enc)

        # map string based triples to int
        entity_mapping = all_attr_enc.entity_id_mapping
        rel_triples_left, entity_mapping, rel_mapping = id_map_rel_triples(
            left_rel, entity_mapping=entity_mapping
        )
        rel_triples_right, entity_mapping, rel_mapping = id_map_rel_triples(
            right_rel,
            entity_mapping=entity_mapping,
            rel_mapping=rel_mapping,
        )

        # initialize entity features randomly and replace with
        # attribute features where known
        ent_features = initialize_and_fill(known=all_attr_enc, all_names=entity_mapping)
        left_ids = list(_get_ids(left, left_rel))
        right_ids = list(_get_ids(right, right_rel))

        # encode relations
        features = self._encode_rel_as(
            rel_triples_left=rel_triples_left,
            rel_triples_right=rel_triples_right,
            ent_features=ent_features,
            return_type=return_type,
        )
        named_features = NamedVector(names=entity_mapping, vectors=features)  # type: ignore

        end = time.time()
        self._encoding_time = end - start
        return named_features.subset(list(left_ids)), named_features.subset(
            list(right_ids)
        )

encode(left, right, *, left_rel=None, right_rel=None, return_type='pt')

Encode dataframes into named vectors.

Parameters:

Name Type Description Default
left SeriesType

Frame: left attribute information.

required
right SeriesType

Frame: right attribute information.

required
*
required
left_rel Optional[Frame]

Optional[Frame]: left relation triples.

None
right_rel Optional[Frame]

Optional[Frame]: right relation triples.

None
return_type GeneralVectorLiteral

GeneralVectorLiteral: Either pt or np to return as pytorch tensor or numpy array.

'pt'

Returns:

Type Description
Tuple[NamedVector, NamedVector]

Embeddings of given left/right dataset.

Source code in klinker/encoders/base.py
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
def encode(
    self,
    left: SeriesType,
    right: SeriesType,
    *,
    left_rel: Optional[Frame] = None,
    right_rel: Optional[Frame] = None,
    return_type: GeneralVectorLiteral = "pt",
) -> Tuple[NamedVector, NamedVector]:
    """Encode dataframes into named vectors.

    Args:
      left: Frame: left attribute information.
      right: Frame: right attribute information.
      *:
      left_rel: Optional[Frame]: left relation triples.
      right_rel: Optional[Frame]: right relation triples.
      return_type: GeneralVectorLiteral:  Either `pt` or `np` to return as pytorch tensor or numpy array.

    Returns:
        Embeddings of given left/right dataset.
    """
    self.validate(left=left, right=right, left_rel=left_rel, right_rel=right_rel)
    left, right = self.prepare(left, right)

    start = time.time()
    # encode attributes
    left_attr_enc, right_attr_enc = self.attribute_encoder.encode(
        left, right, return_type=return_type
    )
    all_attr_enc = left_attr_enc.concat(right_attr_enc)

    # map string based triples to int
    entity_mapping = all_attr_enc.entity_id_mapping
    rel_triples_left, entity_mapping, rel_mapping = id_map_rel_triples(
        left_rel, entity_mapping=entity_mapping
    )
    rel_triples_right, entity_mapping, rel_mapping = id_map_rel_triples(
        right_rel,
        entity_mapping=entity_mapping,
        rel_mapping=rel_mapping,
    )

    # initialize entity features randomly and replace with
    # attribute features where known
    ent_features = initialize_and_fill(known=all_attr_enc, all_names=entity_mapping)
    left_ids = list(_get_ids(left, left_rel))
    right_ids = list(_get_ids(right, right_rel))

    # encode relations
    features = self._encode_rel_as(
        rel_triples_left=rel_triples_left,
        rel_triples_right=rel_triples_right,
        ent_features=ent_features,
        return_type=return_type,
    )
    named_features = NamedVector(names=entity_mapping, vectors=features)  # type: ignore

    end = time.time()
    self._encoding_time = end - start
    return named_features.subset(list(left_ids)), named_features.subset(
        list(right_ids)
    )

validate(left, right, left_rel=None, right_rel=None)

Ensure relation info is provided and attribute frames consist of single column.

Parameters:

Name Type Description Default
left Frame

Frame: left attribute information.

required
right Frame

Frame: right attribute information.

required
left_rel Optional[Frame]

Optional[Frame]: left relation triples.

None
right_rel Optional[Frame]

Optional[Frame]: right relation triples.

None

Raises:

Type Description
ValueError

If attribute frames consist of multiple columns or relational frames are missing.

Source code in klinker/encoders/base.py
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
def validate(
    self,
    left: Frame,
    right: Frame,
    left_rel: Optional[Frame] = None,
    right_rel: Optional[Frame] = None,
):
    """Ensure relation info is provided and attribute frames consist of single column.

    Args:
      left: Frame: left attribute information.
      right: Frame: right attribute information.
      left_rel: Optional[Frame]: left relation triples.
      right_rel: Optional[Frame]: right relation triples.

    Raises:
        ValueError: If attribute frames consist of multiple columns or relational frames are missing.
    """
    super().validate(left=left, right=right)
    if left_rel is None or right_rel is None:
        raise ValueError(f"{self.__class__.__name__} needs left_rel and right_rel!")

SIFEmbeddingTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Use Smooth Inverse Frequency weighting scheme to aggregate token embeddings.

Args:

sif_weighting_param: float: weighting parameter
remove_pc:bool: remove first principal component
min_freq: int: minimum frequency of occurence
tokenized_word_embedder: HintOrType[TokenizedWordEmbedder]: Word Embedding class,
tokenized_word_embedder_kwargs: OptionalKwargs: Keyword arguments for initalizing word embedder
Reference

Arora et. al.,"A Simple but Tough-to-Beat Baseline for Sentence Embeddings", ICLR 2017 https://openreview.net/pdf?id=SyK00v5xx

Source code in klinker/encoders/pretrained.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
class SIFEmbeddingTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Use Smooth Inverse Frequency weighting scheme to aggregate token embeddings.

    Args:

        sif_weighting_param: float: weighting parameter
        remove_pc:bool: remove first principal component
        min_freq: int: minimum frequency of occurence
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder]: Word Embedding class,
        tokenized_word_embedder_kwargs: OptionalKwargs: Keyword arguments for initalizing word embedder

    Quote: Reference
        Arora et. al.,"A Simple but Tough-to-Beat Baseline for Sentence Embeddings", ICLR 2017 <https://openreview.net/pdf?id=SyK00v5xx>
    """

    def __init__(
        self,
        sif_weighting_param: float = 1e-3,
        remove_pc: bool = True,
        min_freq: int = 0,
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder] = None,
        tokenized_word_embedder_kwargs: OptionalKwargs = None,
    ):
        self.tokenized_word_embedder = tokenized_word_embedder_resolver.make(
            tokenized_word_embedder, tokenized_word_embedder_kwargs
        )

        self.sif_weighting_param = sif_weighting_param
        self.remove_pc = remove_pc
        self.min_freq = min_freq
        self.token_weight_dict: Optional[Dict[str, float]] = None

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        """ """
        return self.tokenized_word_embedder.tokenizer_fn

    def prepare(self, left: Frame, right: Frame) -> Tuple[Frame, Frame]:
        """Prepare value counts.

        Args:
          left: Frame: left attribute frame.
          right: Frame: right attribute frame.

        Returns:
            left, right
        """
        left, right = super().prepare(left, right)
        merged_col = "merged"
        left.columns = [merged_col]
        right.columns = [merged_col]
        all_values = concat_frames([left, right])

        value_counts = (
            all_values[merged_col]
            .apply(self.tokenized_word_embedder.tokenizer_fn)
            .explode()
            .value_counts()
        )

        def sif_weighting(x, a: float, min_freq: int, total_tokens: int):
            if x >= min_freq:
                return a / (a + x / total_tokens)
            else:
                return 1.0

        total_tokens = value_counts.sum()
        if isinstance(left, KlinkerDaskFrame):
            total_tokens = total_tokens.compute()

        token_weight_dict = value_counts.apply(
            sif_weighting,
            a=self.sif_weighting_param,
            min_freq=self.min_freq,
            total_tokens=total_tokens,
        )

        if isinstance(left, KlinkerDaskFrame):
            token_weight_dict = token_weight_dict.compute()

        self.token_weight_dict = token_weight_dict.to_dict()
        return left, right

    def _postprocess(self, embeddings) -> GeneralVector:
        # From the code of the SIF paper at
        # https://github.com/PrincetonML/SIF/blob/master/src/SIF_embedding.py
        if self.remove_pc:
            svd = TruncatedSVD(n_components=1, n_iter=7, random_state=0)
            svd.fit(embeddings)
            pc = svd.components_

            sif_embeddings = embeddings - embeddings.dot(pc.transpose()) * pc
        else:
            sif_embeddings = embeddings
        return sif_embeddings

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        if self.token_weight_dict is None:
            self.prepare(left, right)
        if isinstance(left, KlinkerDaskFrame):
            left_enc = left.map_partitions(
                encode_frame,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            ).compute()
            right_enc = right.map_partitions(
                encode_frame,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            ).compute()
        else:
            left_enc = encode_frame(
                left,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            )
            right_enc = encode_frame(
                right,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            )
        if self.remove_pc:
            left_enc = self._postprocess(left_enc)
            right_enc = self._postprocess(right_enc)
        return left_enc, right_enc

tokenizer_fn: Callable[[str], List[str]] property

prepare(left, right)

Prepare value counts.

Parameters:

Name Type Description Default
left Frame

Frame: left attribute frame.

required
right Frame

Frame: right attribute frame.

required

Returns:

Type Description
Tuple[Frame, Frame]

left, right

Source code in klinker/encoders/pretrained.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
def prepare(self, left: Frame, right: Frame) -> Tuple[Frame, Frame]:
    """Prepare value counts.

    Args:
      left: Frame: left attribute frame.
      right: Frame: right attribute frame.

    Returns:
        left, right
    """
    left, right = super().prepare(left, right)
    merged_col = "merged"
    left.columns = [merged_col]
    right.columns = [merged_col]
    all_values = concat_frames([left, right])

    value_counts = (
        all_values[merged_col]
        .apply(self.tokenized_word_embedder.tokenizer_fn)
        .explode()
        .value_counts()
    )

    def sif_weighting(x, a: float, min_freq: int, total_tokens: int):
        if x >= min_freq:
            return a / (a + x / total_tokens)
        else:
            return 1.0

    total_tokens = value_counts.sum()
    if isinstance(left, KlinkerDaskFrame):
        total_tokens = total_tokens.compute()

    token_weight_dict = value_counts.apply(
        sif_weighting,
        a=self.sif_weighting_param,
        min_freq=self.min_freq,
        total_tokens=total_tokens,
    )

    if isinstance(left, KlinkerDaskFrame):
        token_weight_dict = token_weight_dict.compute()

    self.token_weight_dict = token_weight_dict.to_dict()
    return left, right

SentenceTransformerTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Uses sentencetransformer library to encode frames.

See https://www.sbert.net/docs/pretrained_models.html for a list of models.

Parameters:

Name Type Description Default
model_name str

str: pretrained model name

'all-MiniLM-L6-v2'
max_length int

int: max number of tokens per row

128
batch_size int

int: size of batch for encoding

512

Examples:

>>> # doctest: +SKIP
>>> import pandas as pd

>>> from klinker.data import KlinkerPandasFrame
>>> from klinker.encoders import SentenceTransformerTokenizedFrameEncoder

>>> left = KlinkerPandasFrame.from_df(
         pd.DataFrame(
             [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
         ),
         table_name="A",
         id_col="id",
    ).set_index("id")
>>> right = KlinkerPandasFrame.from_df(
        pd.DataFrame(
            [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
        ),
        table_name="B",
        id_col="id",
    ).set_index("id")
>>> ttfe = SentenceTransformerTokenizedFrameEncoder(
        model_name="st5",
        max_length=10,
        batch_size=2
    )
>>> left_enc, right_enc = ttfe.encode(left=left, right=right)
Source code in klinker/encoders/pretrained.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
class SentenceTransformerTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Uses sentencetransformer library to encode frames.

    See <https://www.sbert.net/docs/pretrained_models.html> for a list of models.

    Args:
        model_name: str: pretrained model name
        max_length: int: max number of tokens per row
        batch_size: int: size of batch for encoding

    Examples:

        >>> # doctest: +SKIP
        >>> import pandas as pd

        >>> from klinker.data import KlinkerPandasFrame
        >>> from klinker.encoders import SentenceTransformerTokenizedFrameEncoder

        >>> left = KlinkerPandasFrame.from_df(
                 pd.DataFrame(
                     [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
                 ),
                 table_name="A",
                 id_col="id",
            ).set_index("id")
        >>> right = KlinkerPandasFrame.from_df(
                pd.DataFrame(
                    [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
                ),
                table_name="B",
                id_col="id",
            ).set_index("id")
        >>> ttfe = SentenceTransformerTokenizedFrameEncoder(
                model_name="st5",
                max_length=10,
                batch_size=2
            )
        >>> left_enc, right_enc = ttfe.encode(left=left, right=right)

    """

    def __init__(
        self,
        model_name: str = "all-MiniLM-L6-v2",
        max_length: int = 128,
        batch_size: int = 512,
    ):
        if SentenceTransformer is None:
            raise ImportError("Please install the sentence-transformers library!")
        self.model = SentenceTransformer(model_name)
        self.model.max_seq_length = max_length
        self.batch_size = batch_size

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        return self.model.tokenizer.tokenize

    @torch.no_grad()
    def _encode_side(self, df: Frame) -> GeneralVector:
        return self.model.encode(
            list(df[df.columns[0]].values), batch_size=self.batch_size
        )

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        return self._encode_side(left), self._encode_side(right)

TokenizedFrameEncoder

Bases: FrameEncoder

FrameEncoder that uses tokenization of attribute values.

Source code in klinker/encoders/base.py
153
154
155
156
157
158
159
class TokenizedFrameEncoder(FrameEncoder):
    """FrameEncoder that uses tokenization of attribute values."""

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        """ """
        raise NotImplementedError

tokenizer_fn: Callable[[str], List[str]] property

TransformerTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Encode frames using pre-trained transformer.

See https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModel.from_pretrained for more information on pretrained models.

Parameters:

Name Type Description Default
pretrained_model_name_or_path str

str: Transformer name or path

'bert-base-cased'
max_length int

int: max number of tokens per row

128
batch_size int

int: size of batch for encoding

512

Examples:

>>> # doctest: +SKIP
>>> import pandas as pd

>>> from klinker.data import KlinkerPandasFrame
>>> from klinker.encoders import TransformerTokenizedFrameEncoder

>>> left = KlinkerPandasFrame.from_df(
         pd.DataFrame(
             [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
         ),
         table_name="A",
         id_col="id",
    ).set_index("id")
>>> right = KlinkerPandasFrame.from_df(
        pd.DataFrame(
            [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
        ),
        table_name="B",
        id_col="id",
    ).set_index("id")
>>> ttfe = TransformerTokenizedFrameEncoder(
        pretrained_model_name_or_path="bert-base-cased",
        max_length=10,
        batch_size=2
    )
>>> left_enc, right_enc = ttfe.encode(left=left, right=right)
Source code in klinker/encoders/pretrained.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
class TransformerTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Encode frames using pre-trained transformer.

    See <https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModel.from_pretrained> for more information on pretrained models.

    Args:
        pretrained_model_name_or_path: str: Transformer name or path
        max_length: int: max number of tokens per row
        batch_size: int: size of batch for encoding

    Examples:

        >>> # doctest: +SKIP
        >>> import pandas as pd

        >>> from klinker.data import KlinkerPandasFrame
        >>> from klinker.encoders import TransformerTokenizedFrameEncoder

        >>> left = KlinkerPandasFrame.from_df(
                 pd.DataFrame(
                     [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
                 ),
                 table_name="A",
                 id_col="id",
            ).set_index("id")
        >>> right = KlinkerPandasFrame.from_df(
                pd.DataFrame(
                    [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
                ),
                table_name="B",
                id_col="id",
            ).set_index("id")
        >>> ttfe = TransformerTokenizedFrameEncoder(
                pretrained_model_name_or_path="bert-base-cased",
                max_length=10,
                batch_size=2
            )
        >>> left_enc, right_enc = ttfe.encode(left=left, right=right)

    """

    def __init__(
        self,
        pretrained_model_name_or_path: str = "bert-base-cased",
        max_length: int = 128,
        batch_size: int = 512,
    ):
        if AutoModel is None:
            raise ImportError("Please install the transformers library!")
        self.model = AutoModel.from_pretrained(pretrained_model_name_or_path)
        self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
        self.max_length = max_length
        self.batch_size = batch_size

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        return self.tokenizer.tokenize

    @torch.no_grad()
    def _encode_side(self, df: Frame) -> GeneralVector:
        encoded = []
        for batch in _batch_generator(df, self.batch_size):
            tok = self.tokenizer(
                list(batch),
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=self.max_length,
            )
            encoded.append(self.model(**tok).pooler_output.detach())
        return torch.vstack(encoded)

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        return self._encode_side(left), self._encode_side(right)