Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Llama2Tokenizer #375

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions megatron/arguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -1273,6 +1273,7 @@ def _add_data_args(parser):
'GPT2BPETokenizer',
'SentencePieceTokenizer',
'GPTSentencePieceTokenizer',
'Llama2Tokenizer',
'HFTokenizer',
'NullTokenizer'],
help='What type of tokenizer to use.')
Expand Down
55 changes: 55 additions & 0 deletions megatron/tokenizer/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ def build_tokenizer(args):
elif args.tokenizer_type == 'GPTSentencePieceTokenizer':
assert args.tokenizer_model is not None
tokenizer = _GPTSentencePieceTokenizer(args.tokenizer_model)
elif args.tokenizer_type == 'Llama2Tokenizer':
assert args.tokenizer_model is not None
tokenizer = _Llama2Tokenizer(args.tokenizer_model)
elif args.tokenizer_type == 'NullTokenizer':
assert args.vocab_size is not None
tokenizer = _NullTokenizer(args.vocab_size)
Expand Down Expand Up @@ -465,6 +468,7 @@ def mask(self):
def additional_special_tokens_ids(self):
return [self.vocab[k] for k in self._t5_tokens]


class _GPTSentencePieceTokenizer(_SentencePieceTokenizer):
"""SentencePieceTokenizer-Megatron wrapper"""

Expand Down Expand Up @@ -504,6 +508,57 @@ def eod(self):
def additional_special_tokens_ids(self):
return None


class _Llama2Tokenizer(_SentencePieceTokenizer):
"""SentencePieceTokenizer-Megatron wrapper"""

def __init__(self, model_file,):
super().__init__(model_file, vocab_extra_ids=0)

def _initalize(self, vocab_extra_ids):
self._populate_vocab()

# BOS / EOS token IDs
self.n_words: int = self.tokenizer.vocab_size()
self.bos_id: int = self.tokenizer.bos_id()
self.eos_id: int = self.tokenizer.eos_id()
self.pad_id: int = self.tokenizer.pad_id()
assert self.tokenizer.vocab_size() == self.tokenizer.get_piece_size()

def tokenize(self, s: str, bos=True, eos=False):
'''Default args for text completion, not chat/dialog.'''
assert type(s) is str
t = self.tokenizer.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t

def detokenize(self, ids):
return self.tokenizer.decode_ids(ids)

@property
def cls(self):
return -1

@property
def sep(self):
return -1

@property
def mask(self):
return -1

@property
def eod(self):
return self.eos_id

@property
def additional_special_tokens_ids(self):
return None


class _NullTokenizer:
def __init__(self, vocab_size):
vocab_size = int(vocab_size)
Expand Down