Skip to content

pretrained

AverageEmbeddingTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Averages embeddings of tokenized entity attribute values.

Parameters:

Name Type Description Default
tokenized_word_embedder HintOrType[TokenizedWordEmbedder]

HintOrType[TokenizedWordEmbedder]: Word Embedding class,

None
tokenized_word_embedder_kwargs OptionalKwargs

OptionalKwargs: Keyword arguments for initalizing word embedder

None
Source code in klinker/encoders/pretrained.py
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
class AverageEmbeddingTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Averages embeddings of tokenized entity attribute values.

    Args:
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder]: Word Embedding class,
        tokenized_word_embedder_kwargs: OptionalKwargs: Keyword arguments for initalizing word embedder
    """

    def __init__(
        self,
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder] = None,
        tokenized_word_embedder_kwargs: OptionalKwargs = None,
    ):
        self.tokenized_word_embedder = tokenized_word_embedder_resolver.make(
            tokenized_word_embedder, tokenized_word_embedder_kwargs
        )

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        return self.tokenized_word_embedder.tokenizer_fn

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        if isinstance(left, dd.DataFrame):
            left = left.compute()
            right = right.compute()
        return (
            encode_frame(left, twe=self.tokenized_word_embedder),
            encode_frame(right, twe=self.tokenized_word_embedder),
        )

SIFEmbeddingTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Use Smooth Inverse Frequency weighting scheme to aggregate token embeddings.

Args:

sif_weighting_param: float: weighting parameter
remove_pc:bool: remove first principal component
min_freq: int: minimum frequency of occurence
tokenized_word_embedder: HintOrType[TokenizedWordEmbedder]: Word Embedding class,
tokenized_word_embedder_kwargs: OptionalKwargs: Keyword arguments for initalizing word embedder
Reference

Arora et. al.,"A Simple but Tough-to-Beat Baseline for Sentence Embeddings", ICLR 2017 https://openreview.net/pdf?id=SyK00v5xx

Source code in klinker/encoders/pretrained.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
class SIFEmbeddingTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Use Smooth Inverse Frequency weighting scheme to aggregate token embeddings.

    Args:

        sif_weighting_param: float: weighting parameter
        remove_pc:bool: remove first principal component
        min_freq: int: minimum frequency of occurence
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder]: Word Embedding class,
        tokenized_word_embedder_kwargs: OptionalKwargs: Keyword arguments for initalizing word embedder

    Quote: Reference
        Arora et. al.,"A Simple but Tough-to-Beat Baseline for Sentence Embeddings", ICLR 2017 <https://openreview.net/pdf?id=SyK00v5xx>
    """

    def __init__(
        self,
        sif_weighting_param: float = 1e-3,
        remove_pc: bool = True,
        min_freq: int = 0,
        tokenized_word_embedder: HintOrType[TokenizedWordEmbedder] = None,
        tokenized_word_embedder_kwargs: OptionalKwargs = None,
    ):
        self.tokenized_word_embedder = tokenized_word_embedder_resolver.make(
            tokenized_word_embedder, tokenized_word_embedder_kwargs
        )

        self.sif_weighting_param = sif_weighting_param
        self.remove_pc = remove_pc
        self.min_freq = min_freq
        self.token_weight_dict: Optional[Dict[str, float]] = None

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        """ """
        return self.tokenized_word_embedder.tokenizer_fn

    def prepare(self, left: Frame, right: Frame) -> Tuple[Frame, Frame]:
        """Prepare value counts.

        Args:
          left: Frame: left attribute frame.
          right: Frame: right attribute frame.

        Returns:
            left, right
        """
        left, right = super().prepare(left, right)
        merged_col = "merged"
        left.columns = [merged_col]
        right.columns = [merged_col]
        all_values = concat_frames([left, right])

        value_counts = (
            all_values[merged_col]
            .apply(self.tokenized_word_embedder.tokenizer_fn)
            .explode()
            .value_counts()
        )

        def sif_weighting(x, a: float, min_freq: int, total_tokens: int):
            if x >= min_freq:
                return a / (a + x / total_tokens)
            else:
                return 1.0

        total_tokens = value_counts.sum()
        if isinstance(left, KlinkerDaskFrame):
            total_tokens = total_tokens.compute()

        token_weight_dict = value_counts.apply(
            sif_weighting,
            a=self.sif_weighting_param,
            min_freq=self.min_freq,
            total_tokens=total_tokens,
        )

        if isinstance(left, KlinkerDaskFrame):
            token_weight_dict = token_weight_dict.compute()

        self.token_weight_dict = token_weight_dict.to_dict()
        return left, right

    def _postprocess(self, embeddings) -> GeneralVector:
        # From the code of the SIF paper at
        # https://github.com/PrincetonML/SIF/blob/master/src/SIF_embedding.py
        if self.remove_pc:
            svd = TruncatedSVD(n_components=1, n_iter=7, random_state=0)
            svd.fit(embeddings)
            pc = svd.components_

            sif_embeddings = embeddings - embeddings.dot(pc.transpose()) * pc
        else:
            sif_embeddings = embeddings
        return sif_embeddings

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        if self.token_weight_dict is None:
            self.prepare(left, right)
        if isinstance(left, KlinkerDaskFrame):
            left_enc = left.map_partitions(
                encode_frame,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            ).compute()
            right_enc = right.map_partitions(
                encode_frame,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            ).compute()
        else:
            left_enc = encode_frame(
                left,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            )
            right_enc = encode_frame(
                right,
                twe=self.tokenized_word_embedder,
                weight_dict=self.token_weight_dict,
            )
        if self.remove_pc:
            left_enc = self._postprocess(left_enc)
            right_enc = self._postprocess(right_enc)
        return left_enc, right_enc

tokenizer_fn: Callable[[str], List[str]] property

prepare(left, right)

Prepare value counts.

Parameters:

Name Type Description Default
left Frame

Frame: left attribute frame.

required
right Frame

Frame: right attribute frame.

required

Returns:

Type Description
Tuple[Frame, Frame]

left, right

Source code in klinker/encoders/pretrained.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
def prepare(self, left: Frame, right: Frame) -> Tuple[Frame, Frame]:
    """Prepare value counts.

    Args:
      left: Frame: left attribute frame.
      right: Frame: right attribute frame.

    Returns:
        left, right
    """
    left, right = super().prepare(left, right)
    merged_col = "merged"
    left.columns = [merged_col]
    right.columns = [merged_col]
    all_values = concat_frames([left, right])

    value_counts = (
        all_values[merged_col]
        .apply(self.tokenized_word_embedder.tokenizer_fn)
        .explode()
        .value_counts()
    )

    def sif_weighting(x, a: float, min_freq: int, total_tokens: int):
        if x >= min_freq:
            return a / (a + x / total_tokens)
        else:
            return 1.0

    total_tokens = value_counts.sum()
    if isinstance(left, KlinkerDaskFrame):
        total_tokens = total_tokens.compute()

    token_weight_dict = value_counts.apply(
        sif_weighting,
        a=self.sif_weighting_param,
        min_freq=self.min_freq,
        total_tokens=total_tokens,
    )

    if isinstance(left, KlinkerDaskFrame):
        token_weight_dict = token_weight_dict.compute()

    self.token_weight_dict = token_weight_dict.to_dict()
    return left, right

SentenceTransformerTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Uses sentencetransformer library to encode frames.

See https://www.sbert.net/docs/pretrained_models.html for a list of models.

Parameters:

Name Type Description Default
model_name str

str: pretrained model name

'all-MiniLM-L6-v2'
max_length int

int: max number of tokens per row

128
batch_size int

int: size of batch for encoding

512

Examples:

>>> # doctest: +SKIP
>>> import pandas as pd

>>> from klinker.data import KlinkerPandasFrame
>>> from klinker.encoders import SentenceTransformerTokenizedFrameEncoder

>>> left = KlinkerPandasFrame.from_df(
         pd.DataFrame(
             [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
         ),
         table_name="A",
         id_col="id",
    ).set_index("id")
>>> right = KlinkerPandasFrame.from_df(
        pd.DataFrame(
            [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
        ),
        table_name="B",
        id_col="id",
    ).set_index("id")
>>> ttfe = SentenceTransformerTokenizedFrameEncoder(
        model_name="st5",
        max_length=10,
        batch_size=2
    )
>>> left_enc, right_enc = ttfe.encode(left=left, right=right)
Source code in klinker/encoders/pretrained.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
class SentenceTransformerTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Uses sentencetransformer library to encode frames.

    See <https://www.sbert.net/docs/pretrained_models.html> for a list of models.

    Args:
        model_name: str: pretrained model name
        max_length: int: max number of tokens per row
        batch_size: int: size of batch for encoding

    Examples:

        >>> # doctest: +SKIP
        >>> import pandas as pd

        >>> from klinker.data import KlinkerPandasFrame
        >>> from klinker.encoders import SentenceTransformerTokenizedFrameEncoder

        >>> left = KlinkerPandasFrame.from_df(
                 pd.DataFrame(
                     [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
                 ),
                 table_name="A",
                 id_col="id",
            ).set_index("id")
        >>> right = KlinkerPandasFrame.from_df(
                pd.DataFrame(
                    [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
                ),
                table_name="B",
                id_col="id",
            ).set_index("id")
        >>> ttfe = SentenceTransformerTokenizedFrameEncoder(
                model_name="st5",
                max_length=10,
                batch_size=2
            )
        >>> left_enc, right_enc = ttfe.encode(left=left, right=right)

    """

    def __init__(
        self,
        model_name: str = "all-MiniLM-L6-v2",
        max_length: int = 128,
        batch_size: int = 512,
    ):
        if SentenceTransformer is None:
            raise ImportError("Please install the sentence-transformers library!")
        self.model = SentenceTransformer(model_name)
        self.model.max_seq_length = max_length
        self.batch_size = batch_size

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        return self.model.tokenizer.tokenize

    @torch.no_grad()
    def _encode_side(self, df: Frame) -> GeneralVector:
        return self.model.encode(
            list(df[df.columns[0]].values), batch_size=self.batch_size
        )

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        return self._encode_side(left), self._encode_side(right)

TokenizedWordEmbedder

Encode using pre-trained word embeddings.

Parameters:

Name Type Description Default
embedding_fn Union[str, Callable[[str], GeneralVector]]

Union[str, Callable[[str], GeneralVector]]: Either one of "fasttext","glove","word2vec" or embedding function

'fasttext'
tokenizer_fn Callable[[str], List[str]]

Callable[[str], List[str]]: Tokenizer function.

word_tokenize
Source code in klinker/encoders/pretrained.py
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
class TokenizedWordEmbedder:
    """Encode using pre-trained word embeddings.

    Args:
      embedding_fn: Union[str, Callable[[str], GeneralVector]]: Either one of "fasttext","glove","word2vec" or embedding function
      tokenizer_fn: Callable[[str], List[str]]: Tokenizer function.
    """

    _gensim_mapping_download = {
        "fasttext": "fasttext-wiki-news-subwords-300",
        "glove": "glove-wiki-gigaword-300",
        "word2vec": "word2vec-google-news-300",
    }

    def __init__(
        self,
        embedding_fn: Union[str, Callable[[str], GeneralVector]] = "fasttext",
        tokenizer_fn: Callable[[str], List[str]] = word_tokenize,
    ):
        if isinstance(embedding_fn, str):
            if embedding_fn in TokenizedWordEmbedder._gensim_mapping_download:
                actual_name = TokenizedWordEmbedder._gensim_mapping_download[
                    embedding_fn
                ]
                memmap_path = str(word_embedding_dir.joinpath(f"{actual_name}.kv"))
                if not os.path.exists(memmap_path):
                    kv = gensim_downloader.load(actual_name)
                    kv.save(memmap_path)
                else:
                    kv = KeyedVectors.load(memmap_path, mmap="r")
            else:
                kv = gensim_downloader.load(embedding_fn)
            self.embedding_fn = kv.__getitem__
        else:
            self.embedding_fn = embedding_fn
        self.tokenizer_fn = tokenizer_fn
        self._embedding_dim = -1
        self._unknown_token_counter = 0

    @property
    def embedding_dim(self) -> int:
        """Embedding dimension of pretrained word embeddings."""
        if self._embedding_dim == -1:
            self._embedding_dim = self.embedding_fn("hello").shape[0]
        return self._embedding_dim

    def embed(self, values: str) -> np.ndarray:
        """Tokenizes string and returns average of token embeddings.

        Args:
          values: str: string value to embed.

        Returns:
            embedding
        """
        return self.weighted_embed(values, {})

    def weighted_embed(
        self, values: str, weight_mapping: Dict[str, float]
    ) -> np.ndarray:
        """Tokenizes string and returns weighted average of token embeddings.

        Args:
          values: str: string value to embed.
          weight_mapping: Dict[str, float]: weights for tokens.

        Returns:
            embedding
        """
        # TODO fix code duplication across embed methods can be solved better
        embedded: List[GeneralVector] = []
        for tok in self.tokenizer_fn(values):
            try:
                tok_emb = self.embedding_fn(tok) * weight_mapping.get(tok, 1.0)
                embedded.append(tok_emb)
            except KeyError:
                self._unknown_token_counter += 1
                continue
        if len(embedded) == 0:
            return np.array([np.nan] * self.embedding_dim)
        emb: np.ndarray = np.mean(np.vstack(embedded), axis=0)
        return emb

embedding_dim: int property

Embedding dimension of pretrained word embeddings.

embed(values)

Tokenizes string and returns average of token embeddings.

Parameters:

Name Type Description Default
values str

str: string value to embed.

required

Returns:

Type Description
ndarray

embedding

Source code in klinker/encoders/pretrained.py
254
255
256
257
258
259
260
261
262
263
def embed(self, values: str) -> np.ndarray:
    """Tokenizes string and returns average of token embeddings.

    Args:
      values: str: string value to embed.

    Returns:
        embedding
    """
    return self.weighted_embed(values, {})

weighted_embed(values, weight_mapping)

Tokenizes string and returns weighted average of token embeddings.

Parameters:

Name Type Description Default
values str

str: string value to embed.

required
weight_mapping Dict[str, float]

Dict[str, float]: weights for tokens.

required

Returns:

Type Description
ndarray

embedding

Source code in klinker/encoders/pretrained.py
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
def weighted_embed(
    self, values: str, weight_mapping: Dict[str, float]
) -> np.ndarray:
    """Tokenizes string and returns weighted average of token embeddings.

    Args:
      values: str: string value to embed.
      weight_mapping: Dict[str, float]: weights for tokens.

    Returns:
        embedding
    """
    # TODO fix code duplication across embed methods can be solved better
    embedded: List[GeneralVector] = []
    for tok in self.tokenizer_fn(values):
        try:
            tok_emb = self.embedding_fn(tok) * weight_mapping.get(tok, 1.0)
            embedded.append(tok_emb)
        except KeyError:
            self._unknown_token_counter += 1
            continue
    if len(embedded) == 0:
        return np.array([np.nan] * self.embedding_dim)
    emb: np.ndarray = np.mean(np.vstack(embedded), axis=0)
    return emb

TransformerTokenizedFrameEncoder

Bases: TokenizedFrameEncoder

Encode frames using pre-trained transformer.

See https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModel.from_pretrained for more information on pretrained models.

Parameters:

Name Type Description Default
pretrained_model_name_or_path str

str: Transformer name or path

'bert-base-cased'
max_length int

int: max number of tokens per row

128
batch_size int

int: size of batch for encoding

512

Examples:

>>> # doctest: +SKIP
>>> import pandas as pd

>>> from klinker.data import KlinkerPandasFrame
>>> from klinker.encoders import TransformerTokenizedFrameEncoder

>>> left = KlinkerPandasFrame.from_df(
         pd.DataFrame(
             [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
         ),
         table_name="A",
         id_col="id",
    ).set_index("id")
>>> right = KlinkerPandasFrame.from_df(
        pd.DataFrame(
            [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
        ),
        table_name="B",
        id_col="id",
    ).set_index("id")
>>> ttfe = TransformerTokenizedFrameEncoder(
        pretrained_model_name_or_path="bert-base-cased",
        max_length=10,
        batch_size=2
    )
>>> left_enc, right_enc = ttfe.encode(left=left, right=right)
Source code in klinker/encoders/pretrained.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
class TransformerTokenizedFrameEncoder(TokenizedFrameEncoder):
    """Encode frames using pre-trained transformer.

    See <https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModel.from_pretrained> for more information on pretrained models.

    Args:
        pretrained_model_name_or_path: str: Transformer name or path
        max_length: int: max number of tokens per row
        batch_size: int: size of batch for encoding

    Examples:

        >>> # doctest: +SKIP
        >>> import pandas as pd

        >>> from klinker.data import KlinkerPandasFrame
        >>> from klinker.encoders import TransformerTokenizedFrameEncoder

        >>> left = KlinkerPandasFrame.from_df(
                 pd.DataFrame(
                     [("a1", "John Doe"), ("a2", "Jane Doe")], columns=["id", "values"]
                 ),
                 table_name="A",
                 id_col="id",
            ).set_index("id")
        >>> right = KlinkerPandasFrame.from_df(
                pd.DataFrame(
                    [("b1", "Johnny Doe"), ("b2", "Jane Doe")], columns=["id", "values"]
                ),
                table_name="B",
                id_col="id",
            ).set_index("id")
        >>> ttfe = TransformerTokenizedFrameEncoder(
                pretrained_model_name_or_path="bert-base-cased",
                max_length=10,
                batch_size=2
            )
        >>> left_enc, right_enc = ttfe.encode(left=left, right=right)

    """

    def __init__(
        self,
        pretrained_model_name_or_path: str = "bert-base-cased",
        max_length: int = 128,
        batch_size: int = 512,
    ):
        if AutoModel is None:
            raise ImportError("Please install the transformers library!")
        self.model = AutoModel.from_pretrained(pretrained_model_name_or_path)
        self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
        self.max_length = max_length
        self.batch_size = batch_size

    @property
    def tokenizer_fn(self) -> Callable[[str], List[str]]:
        return self.tokenizer.tokenize

    @torch.no_grad()
    def _encode_side(self, df: Frame) -> GeneralVector:
        encoded = []
        for batch in _batch_generator(df, self.batch_size):
            tok = self.tokenizer(
                list(batch),
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=self.max_length,
            )
            encoded.append(self.model(**tok).pooler_output.detach())
        return torch.vstack(encoded)

    def _encode(
        self,
        left: Frame,
        right: Frame,
        left_rel: Optional[Frame] = None,
        right_rel: Optional[Frame] = None,
    ) -> Tuple[GeneralVector, GeneralVector]:
        return self._encode_side(left), self._encode_side(right)

encode_frame(df, twe, weight_dict=None)

Encode Frame with tokenized word embedder.

Parameters:

Name Type Description Default
df Frame

Frame:

required
twe TokenizedWordEmbedder

TokenizedWordEmbedder:

required
weight_dict Optional[Dict]

Dict: (Default value = None)

None

Returns:

Type Description
ndarray

embeddings

Source code in klinker/encoders/pretrained.py
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
def encode_frame(
    df: Frame, twe: TokenizedWordEmbedder, weight_dict: Optional[Dict] = None
) -> np.ndarray:
    """Encode Frame with tokenized word embedder.

    Args:
      df: Frame:
      twe: TokenizedWordEmbedder:
      weight_dict: Dict:  (Default value = None)

    Returns:
        embeddings
    """
    embeddings: np.ndarray = torch.nn.init.xavier_normal_(
        torch.empty(len(df), twe.embedding_dim)
    ).numpy()
    # TODO vectorize this?
    for idx, val in enumerate(df[df.columns[0]].values):
        if weight_dict:
            emb = twe.weighted_embed(val, weight_dict)
        else:
            emb = twe.embed(val)
        if not any(np.isnan(emb)):
            embeddings[idx] = emb
    return embeddings