From 56e660dfdef3491875050293281b0c9a4e430b73 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Thu, 12 Feb 2026 21:35:02 +0900 Subject: [PATCH] feat: add process_escape function to handle escape sequences in prompts --- anima_minimal_inference.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/anima_minimal_inference.py b/anima_minimal_inference.py index 1865e3f3..65fc0d01 100644 --- a/anima_minimal_inference.py +++ b/anima_minimal_inference.py @@ -305,6 +305,16 @@ def decode_latent( logger.info(f"Decoded. Pixel shape {pixels.shape}") return pixels[0] # remove batch dimension +def process_escape(text: str) -> str: + """Process escape sequences in text + + Args: + text: Input text with escape sequences + + Returns: + str: Processed text + """ + return text.encode("utf-8").decode("unicode_escape") def prepare_text_inputs( args: argparse.Namespace, device: torch.device, anima: anima_models.Anima, shared_models: Optional[Dict] = None @@ -354,7 +364,7 @@ def prepare_text_inputs( logger.info("Encoding prompt with Text Encoder") - prompt = args.prompt + prompt = process_escape(args.prompt) cache_key = prompt if cache_key in conds_cache: embed = conds_cache[cache_key] @@ -380,13 +390,16 @@ def prepare_text_inputs( conds_cache[cache_key] = embed - negative_prompt = args.negative_prompt + negative_prompt = process_escape(args.negative_prompt) cache_key = negative_prompt if cache_key in conds_cache: negative_embed = conds_cache[cache_key] else: move_models_to_device_if_needed() + tokenize_strategy = strategy_base.TokenizeStrategy.get_strategy() + encoding_strategy = strategy_base.TextEncodingStrategy.get_strategy() + with torch.no_grad(): # negative_embed = anima_text_encoder.get_text_embeds(anima, tokenizer, text_encoder, t5xxl_tokenizer, negative_prompt) tokens = tokenize_strategy.tokenize(negative_prompt)