Skip to content

Make TorchTune Llama model KV cache compatible in eager #6643

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 60 commits into from
Nov 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
60 commits
Select commit Hold shift + click to select a range
7f81e00
Changes to native runner to run tt
jackzhxng Oct 9, 2024
0b5a9a7
Add kwarg example inputs to eager model base
jackzhxng Sep 30, 2024
a9647d2
Create create new method for example kwarg inputs instead
jackzhxng Oct 7, 2024
fa3b1d2
Add kwarg example inputs to eager model base
jackzhxng Sep 30, 2024
e8715ba
Lint
jackzhxng Oct 8, 2024
a6f96a2
Accept model type parameter in export_llama
jackzhxng Oct 5, 2024
328c72c
Remove future implementation
jackzhxng Oct 5, 2024
ec80bba
Lint
jackzhxng Oct 15, 2024
c9bbe12
Create create new method for example kwarg inputs instead
jackzhxng Oct 7, 2024
99d5bfb
Accept model type parameter in export_llama
jackzhxng Oct 5, 2024
1fb2236
Torchtune llama3_2_vision model in ET, no quantization
jackzhxng Oct 5, 2024
e0c4b8a
Fix vision model example input
jackzhxng Oct 8, 2024
e145bd1
Lint
jackzhxng Oct 22, 2024
ed906cb
Kv cache
jackzhxng Oct 25, 2024
6dd47e7
Merge branch 'main' into jz/tt-llama
jackzhxng Oct 25, 2024
1825972
Update READMEs
jackzhxng Oct 25, 2024
196499a
Change model default arg
jackzhxng Oct 25, 2024
96ba40b
Update eager runner and eval llama
jackzhxng Oct 25, 2024
18a82e1
Merge branch 'jz/tt-llama-rebased' into jz/tt-llama-2
jackzhxng Oct 25, 2024
0f3035d
Fix tests
jackzhxng Oct 25, 2024
e677e14
Merge branch 'jz/tt-llama-rebased' into jz/tt-llama-2
jackzhxng Oct 25, 2024
b1f6678
Fix tests again
jackzhxng Oct 28, 2024
13d004b
Merge branch 'jz/tt-llama-rebased' into jz/tt-llama-2
jackzhxng Oct 28, 2024
c79b773
Strict = True
jackzhxng Oct 31, 2024
b8ff8e2
Things work
jackzhxng Oct 31, 2024
25ec7ce
Merge branch 'jz/tt-llama-rebased' into jz/native-runner-tt
jackzhxng Oct 31, 2024
6e38763
Clip logits if torchtune
jackzhxng Oct 31, 2024
7a7041d
Merge branch 'jz/tt-llama-2' into jz/native-runner-tt
jackzhxng Oct 31, 2024
96d5798
Fix
jackzhxng Oct 31, 2024
f275e2e
Kv cache by default is false
jackzhxng Nov 1, 2024
37011d3
Clean up
jackzhxng Nov 1, 2024
7d52002
Export model with KV cache + runner for Torchtune models
jackzhxng Nov 4, 2024
e44b259
Export with no kv cache + non-strict load checkpoint
jackzhxng Nov 6, 2024
de45c48
Strict = True
jackzhxng Oct 31, 2024
2fe7bd8
Merge branch 'main' into jz/tt-llama-2
jackzhxng Nov 13, 2024
64dcbda
Lint
jackzhxng Nov 13, 2024
a89d6b2
Fix merge
jackzhxng Nov 13, 2024
e1ec74c
Merge branch 'jz/tt-llama-2' into jz/native-runner-tt
jackzhxng Nov 13, 2024
84422d9
Fixes
jackzhxng Nov 13, 2024
1163769
Remove token count printing
jackzhxng Nov 13, 2024
a0e33d9
Merge branch 'jz/native-runner-tt' into jz/tt-llama-kv-cache
jackzhxng Nov 13, 2024
aa289ea
Fix faulty merge
jackzhxng Nov 13, 2024
eeeeb8a
Add runner
jackzhxng Nov 13, 2024
c80ce1c
Remove has_full_logits from llama runner
jackzhxng Nov 13, 2024
9bd405f
Lint
jackzhxng Nov 13, 2024
7507002
Modularize and update base eager runner
jackzhxng Nov 13, 2024
e5428de
Move to subdir
jackzhxng Nov 14, 2024
eefadaa
Merge branch 'jz/tt-llama-2' into jz/native-runner-tt
jackzhxng Nov 14, 2024
bf33485
Merge remote-tracking branch 'origin/main' into jz/tt-llama-2
jackzhxng Nov 14, 2024
9c5647c
Merge branch 'jz/tt-llama-2' into jz/native-runner-tt
jackzhxng Nov 14, 2024
f61a347
Tarun rev
jackzhxng Nov 14, 2024
a36703e
Merge branch 'jz/native-runner-tt' into jz/tt-llama-kv-cache
jackzhxng Nov 14, 2024
7a0101f
Add automatically generated export tests
jackzhxng Nov 14, 2024
9777e23
Fix internal pyre warning
jackzhxng Nov 14, 2024
2b9f281
Merge branch 'jz/tt-llama-2' into jz/native-runner-tt
jackzhxng Nov 14, 2024
f504cc5
Merge branch 'jz/native-runner-tt' into jz/tt-llama-kv-cache
jackzhxng Nov 14, 2024
1e26f60
Add executorch runner
jackzhxng Nov 15, 2024
b74e2c3
Merge remote-tracking branch 'origin/main' into jz/tt-llama-kv-cache
jackzhxng Nov 15, 2024
f8f8f06
Fix test
jackzhxng Nov 15, 2024
09e9675
Lint
jackzhxng Nov 15, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions examples/models/llama/runner/eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,13 @@

import argparse
import json
from typing import Optional
from typing import Optional, Type

import torch

from executorch.examples.models.llama.export_llama_lib import (
_prepare_for_llama_export,
build_args_parser as _build_args_parser,
TORCHTUNE_DEFINED_MODELS,
)
from executorch.examples.models.llama.runner.generation import LlamaRunner
from executorch.extension.llm.export.builder import LLMEdgeManager
Expand All @@ -33,7 +32,6 @@ def __init__(self, args):
max_batch_size=1,
use_kv_cache=args.use_kv_cache,
vocab_size=params["vocab_size"],
has_full_logits=args.model in TORCHTUNE_DEFINED_MODELS,
device="cuda" if torch.cuda.is_available() else "cpu",
)
manager: LLMEdgeManager = _prepare_for_llama_export(args)
Expand Down Expand Up @@ -79,11 +77,10 @@ def build_args_parser() -> argparse.ArgumentParser:
return parser


def main() -> None:
def execute_runner(runner_class: Type[LlamaRunner]) -> None:
parser = build_args_parser()
args = parser.parse_args()

runner = EagerLlamaRunner(args)
runner = runner_class(args)
generated_tokens = (
runner.chat_completion(temperature=args.temperature)
if args.chat
Expand All @@ -97,5 +94,9 @@ def main() -> None:
print(f"Generated {len(generated_tokens)} tokens: {generated_tokens}")


def main() -> None:
execute_runner(EagerLlamaRunner)


if __name__ == "__main__":
main() # pragma: no cover
15 changes: 3 additions & 12 deletions examples/models/llama/runner/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ def __init__(
max_batch_size: int,
use_kv_cache: bool,
vocab_size: int,
has_full_logits: bool = False,
device: str = "cpu",
):
"""
Expand All @@ -65,14 +64,12 @@ def __init__(
max_batch_size: max batch size.
use_kv_cache: whether to use a KV cache.
vocab_size: number of items in the vocab.
has_full_logits: whether the model returns the full logits or only returns the last logit.
device: device to run the runner on.
"""
self.max_seq_len = max_seq_len
self.max_batch_size = max_batch_size
self.use_kv_cache = use_kv_cache
self.tokenizer = get_tokenizer(tokenizer_path)
self.has_full_logits = has_full_logits
self.device = device
assert vocab_size == self.tokenizer.n_words

Expand All @@ -93,7 +90,7 @@ def generate( # noqa: C901
echo: bool = False,
pos_base: int = 0,
) -> List[int]:
# prefill
# Prefill
logits = self.forward(
tokens=torch.tensor([prompt_tokens], dtype=torch.long, device=self.device),
input_pos=(
Expand All @@ -103,10 +100,7 @@ def generate( # noqa: C901
),
)

if self.has_full_logits:
current_token = next_token(logits[:, -1, :], temperature, top_p)
else:
current_token = next_token(logits, temperature, top_p)
current_token = next_token(logits, temperature, top_p)
print(f"{self.tokenizer.decode_token(current_token)}", end="", flush=True)
tokens = prompt_tokens + [current_token]

Expand All @@ -128,10 +122,7 @@ def generate( # noqa: C901
)

# If the logits aren't already clipped to only contain the last logit, clip them.
if self.has_full_logits:
current_token = next_token(logits[:, -1, :], temperature, top_p)
else:
current_token = next_token(logits, temperature, top_p)
current_token = next_token(logits, temperature, top_p)
tokens.append(current_token)

if current_token == self.tokenizer.eos_id or (
Expand Down
1 change: 0 additions & 1 deletion examples/models/llama/runner/native.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def __init__(self, args):
max_batch_size=1,
use_kv_cache=args.kv_cache,
vocab_size=params["vocab_size"],
has_full_logits=args.model in TORCHTUNE_DEFINED_MODELS,
)
self.model = _load_for_executorch(args.pte)

Expand Down
53 changes: 53 additions & 0 deletions examples/models/llama3_2_vision/runner/eager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import json
from typing import Optional

import torch

from executorch.examples.models.llama.export_llama_lib import _prepare_for_llama_export
from executorch.examples.models.llama.runner.eager import execute_runner
from executorch.examples.models.llama3_2_vision.runner.generation import (
TorchTuneLlamaRunner,
)
from executorch.extension.llm.export import LLMEdgeManager


class EagerLlamaRunner(TorchTuneLlamaRunner):
"""
Runs llama in eager mode with provided checkpoint file.
"""

def __init__(self, args):
with open(args.params, "r") as f:
params = json.loads(f.read())
super().__init__(
tokenizer_path=args.tokenizer_path,
max_seq_len=args.max_seq_length,
max_batch_size=1,
use_kv_cache=args.use_kv_cache,
vocab_size=params["vocab_size"],
device="cuda" if torch.cuda.is_available() else "cpu",
)
manager: LLMEdgeManager = _prepare_for_llama_export(args)
self.model = manager.model.eval().to(device=self.device)

def forward(
self,
tokens: Optional[torch.LongTensor] = None,
input_pos: Optional[torch.LongTensor] = None,
mask: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
return self.model.forward(tokens=tokens, input_pos=input_pos, mask=mask)


def main() -> None:
execute_runner(EagerLlamaRunner)


if __name__ == "__main__":
main() # pragma: no cover
101 changes: 101 additions & 0 deletions examples/models/llama3_2_vision/runner/generation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from typing import List

import torch
from executorch.examples.models.llama.runner.generation import LlamaRunner, next_token


class TorchTuneLlamaRunner(LlamaRunner):
def __init__(
self,
tokenizer_path: str,
max_seq_len: int,
max_batch_size: int,
use_kv_cache: bool,
vocab_size: int,
device: str = "cpu",
):
super().__init__(
tokenizer_path,
max_seq_len,
max_batch_size,
use_kv_cache,
vocab_size,
device,
)

self.causal_mask = torch.tril(
torch.ones(
size=(max_seq_len, max_seq_len),
dtype=torch.bool,
)
)
self.input_pos = torch.arange(max_seq_len)

def generate( # noqa: C901
self,
prompt_tokens: List[int],
max_seq_len: int,
temperature: float = 0.8,
top_p: float = 0.9,
echo: bool = False,
) -> List[int]:
# Prefill
seq_len = len(prompt_tokens)
input_pos = self.input_pos[None, :seq_len]
mask = self.causal_mask[None, :seq_len]
if self.use_kv_cache:
logits = self.forward(
tokens=torch.tensor(
[prompt_tokens], dtype=torch.long, device=self.device
),
input_pos=input_pos,
mask=mask,
)
else:
logits = self.forward(
tokens=torch.tensor(
[prompt_tokens], dtype=torch.long, device=self.device
),
)

# Only need the last logit.
current_token = next_token(logits[:, -1, :], temperature, top_p)
print(f"{self.tokenizer.decode_token(current_token)}", end="", flush=True)
tokens = prompt_tokens + [current_token]

while len(tokens) < max_seq_len:
mask = self.causal_mask[None, seq_len, None, :]
input_pos = self.input_pos[None, seq_len, None]
if self.use_kv_cache:
logits = self.forward(
tokens=torch.tensor(
[[current_token]], dtype=torch.long, device=self.device
),
input_pos=input_pos,
mask=mask,
)
else:
logits = self.forward(
tokens=torch.tensor([tokens], dtype=torch.long, device=self.device),
)

# Only need the last logit.
current_token = next_token(logits[:, -1, :], temperature, top_p)
tokens.append(current_token)

if current_token == self.tokenizer.eos_id or (
hasattr(self.tokenizer, "stop_tokens")
and current_token in self.tokenizer.stop_tokens
):
break

print(f"{self.tokenizer.decode_token(current_token)}", end="", flush=True)
seq_len += 1

return tokens if echo else tokens[len(prompt_tokens) :]
131 changes: 131 additions & 0 deletions examples/models/llama3_2_vision/runner/native.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import argparse
import json
from typing import Optional

import torch

from executorch.examples.models.llama.export_llama_lib import (
EXECUTORCH_DEFINED_MODELS,
TORCHTUNE_DEFINED_MODELS,
)
from executorch.examples.models.llama3_2_vision.runner.generation import (
TorchTuneLlamaRunner,
)

from executorch.extension.pybindings.portable_lib import _load_for_executorch

# Load custom ops and quantized ops.
from executorch.extension.pybindings import portable_lib # noqa # usort: skip

# Note: import this after portable_lib
from executorch.extension.llm.custom_ops import sdpa_with_kv_cache # noqa # usort: skip
from executorch.kernels import quantized # noqa


class NativeLlamaRunner(TorchTuneLlamaRunner):
"""
Runs llama via ExecuTorch with provided pte file.
"""

def __init__(self, args):
with open(args.params, "r") as f:
params = json.loads(f.read())
super().__init__(
tokenizer_path=args.tokenizer,
max_seq_len=args.max_len,
max_batch_size=1,
use_kv_cache=args.kv_cache,
vocab_size=params["vocab_size"],
)
self.model = _load_for_executorch(args.pte)
self.use_kv_cache = args.kv_cache

def forward(
self,
tokens: torch.Tensor,
input_pos: Optional[torch.Tensor] = None,
mask: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
return (
self.model.forward((tokens, input_pos, mask))
if self.use_kv_cache
else self.model.forward((tokens,))
)[0]


def build_args_parser() -> argparse.ArgumentParser:
# TODO: merge these with build_args_parser from export_llama_lib.
parser = argparse.ArgumentParser()

parser.add_argument(
"--model",
default="llama3",
choices=EXECUTORCH_DEFINED_MODELS + TORCHTUNE_DEFINED_MODELS,
)

parser.add_argument(
"-f",
"--pte",
type=str,
default=None,
help="path to exported executorch .pte file",
)

parser.add_argument(
"-p", "--params", type=str, default=None, help="model params file"
)

parser.add_argument(
"-t",
"--tokenizer",
type=str,
default=None,
)

parser.add_argument(
"--prompt",
type=str,
default="Hello",
)

parser.add_argument(
"--temperature",
type=float,
default=0.6,
)

parser.add_argument(
"-kv",
"--kv_cache",
action="store_true",
)

parser.add_argument(
"--max_len",
type=int,
default=128,
help="Maximum length of the generated response sequence.",
)

return parser


def main() -> None:
parser = build_args_parser()
args = parser.parse_args()
runner = NativeLlamaRunner(args)
generated_tokens = runner.text_completion(
prompt=args.prompt,
temperature=args.temperature,
)
print(f"Response: {generated_tokens}")


if __name__ == "__main__":
main() # pragma: no cover
Loading
Loading