Skip to content

[bug]: While generating mutiple images via flux there is a delay in presenting the current last image. #8870

@T-Wittich

Description

@T-Wittich

Is there an existing issue for this problem?

  • I have searched the existing issues

Install method

Invoke's Launcher

Operating system

Windows

GPU vendor

Nvidia (CUDA)

GPU model

RTX4090

GPU VRAM

24GB

Version number

v6.11.1

Browser

No response

System Information

{
"version": "6.11.1",
"dependencies": {
"absl-py" : "2.4.0",
"accelerate" : "1.12.0",
"annotated-types" : "0.7.0",
"anyio" : "4.12.1",
"attrs" : "25.4.0",
"bidict" : "0.23.1",
"bitsandbytes" : "0.49.1",
"blake3" : "1.0.8",
"certifi" : "2022.12.7",
"cffi" : "2.0.0",
"charset-normalizer" : "2.1.1",
"click" : "8.3.1",
"colorama" : "0.4.6",
"coloredlogs" : "15.0.1",
"compel" : "2.1.1",
"contourpy" : "1.3.3",
"CUDA" : "12.8",
"cycler" : "0.12.1",
"Deprecated" : "1.3.1",
"diffusers" : "0.36.0",
"dnspython" : "2.8.0",
"dynamicprompts" : "0.31.0",
"einops" : "0.8.2",
"fastapi" : "0.118.3",
"fastapi-events" : "0.12.2",
"filelock" : "3.20.0",
"flatbuffers" : "25.12.19",
"fonttools" : "4.61.1",
"fsspec" : "2025.12.0",
"gguf" : "0.17.1",
"h11" : "0.16.0",
"httpcore" : "1.0.9",
"httptools" : "0.7.1",
"httpx" : "0.28.1",
"huggingface_hub" : "0.36.2",
"humanfriendly" : "10.0",
"idna" : "3.4",
"importlib_metadata" : "7.1.0",
"invisible-watermark" : "0.2.0",
"InvokeAI" : "6.11.1",
"jax" : "0.7.1",
"jaxlib" : "0.7.1",
"Jinja2" : "3.1.6",
"kiwisolver" : "1.4.9",
"MarkupSafe" : "2.1.5",
"matplotlib" : "3.10.8",
"mediapipe" : "0.10.14",
"ml_dtypes" : "0.5.4",
"mpmath" : "1.3.0",
"networkx" : "3.6.1",
"numpy" : "1.26.4",
"onnx" : "1.16.1",
"onnxruntime" : "1.19.2",
"opencv-contrib-python": "4.11.0.86",
"opencv-python" : "4.11.0.86",
"opt_einsum" : "3.4.0",
"packaging" : "24.1",
"picklescan" : "1.0.1",
"pillow" : "12.0.0",
"prompt_toolkit" : "3.0.52",
"protobuf" : "4.25.8",
"psutil" : "7.2.2",
"pycparser" : "3.0",
"pydantic" : "2.12.5",
"pydantic-settings" : "2.12.0",
"pydantic_core" : "2.41.5",
"pyparsing" : "3.3.2",
"PyPatchMatch" : "1.0.2",
"pyreadline3" : "3.5.4",
"python-dateutil" : "2.9.0.post0",
"python-dotenv" : "1.2.1",
"python-engineio" : "4.13.0",
"python-multipart" : "0.0.22",
"python-socketio" : "5.16.0",
"PyWavelets" : "1.9.0",
"PyYAML" : "6.0.3",
"regex" : "2026.1.15",
"requests" : "2.28.1",
"safetensors" : "0.7.0",
"scipy" : "1.17.0",
"semver" : "3.0.4",
"sentencepiece" : "0.2.0",
"setuptools" : "70.2.0",
"simple-websocket" : "1.1.0",
"six" : "1.17.0",
"sniffio" : "1.3.1",
"sounddevice" : "0.5.5",
"spandrel" : "0.4.1",
"starlette" : "0.48.0",
"sympy" : "1.14.0",
"tokenizers" : "0.22.2",
"torch" : "2.7.1+cu128",
"torchsde" : "0.2.6",
"torchvision" : "0.22.1+cu128",
"tqdm" : "4.66.5",
"trampoline" : "0.1.2",
"transformers" : "4.57.6",
"typing-inspection" : "0.4.2",
"typing_extensions" : "4.15.0",
"urllib3" : "1.26.13",
"uvicorn" : "0.40.0",
"watchfiles" : "1.1.1",
"wcwidth" : "0.6.0",
"websockets" : "16.0",
"wrapt" : "2.1.1",
"wsproto" : "1.3.2",
"zipp" : "3.19.2"
},
"config": {
"schema_version": "4.0.2",
"legacy_models_yaml_path": null,
"host": "127.0.0.1",
"port": 9090,
"allow_origins": [],
"allow_credentials": true,
"allow_methods": [""],
"allow_headers": ["
"],
"ssl_certfile": null,
"ssl_keyfile": null,
"log_tokenization": false,
"patchmatch": true,
"models_dir": "U:\models\checkpoints",
"convert_cache_dir": "models\.convert_cache",
"download_cache_dir": "models\.download_cache",
"legacy_conf_dir": "configs",
"db_dir": "databases",
"outputs_dir": "V:\InvokeAI",
"custom_nodes_dir": "nodes",
"style_presets_dir": "style_presets",
"workflow_thumbnails_dir": "workflow_thumbnails",
"log_handlers": ["console"],
"log_format": "color",
"log_level": "info",
"log_sql": false,
"log_level_network": "warning",
"use_memory_db": false,
"dev_reload": false,
"profile_graphs": false,
"profile_prefix": null,
"profiles_dir": "profiles",
"max_cache_ram_gb": null,
"max_cache_vram_gb": null,
"log_memory_usage": true,
"model_cache_keep_alive_min": 0,
"device_working_mem_gb": 3,
"enable_partial_loading": true,
"keep_ram_copy_of_weights": true,
"ram": null,
"vram": null,
"lazy_offload": true,
"pytorch_cuda_alloc_conf": "backend:cudaMallocAsync",
"device": "auto",
"precision": "auto",
"sequential_guidance": false,
"attention_type": "auto",
"attention_slice_size": "auto",
"force_tiled_decode": false,
"pil_compress_level": 1,
"max_queue_size": 10000,
"clear_queue_on_startup": false,
"allow_nodes": null,
"deny_nodes": null,
"node_cache_size": 512,
"hashing_algorithm": "blake3_single",
"remote_api_tokens": [ {"url_regex": "huggingface.co", "token": "REDACTED"} ],
"scan_models_on_startup": false,
"unsafe_disable_picklescan": false,
"allow_unknown_models": true
},
"set_config_fields": [
"legacy_models_yaml_path", "outputs_dir", "log_memory_usage", "remote_api_tokens",
"pytorch_cuda_alloc_conf", "enable_partial_loading", "models_dir"
]
}


What happened

I generate multiple Image with a flux model.
After some time the image appears in the output folder. But in the GUI this image is shown in a intermediate state und the running Icon is displayed. This state exist for a quite long time before the next image calculation is started.
But why? The image is already done, there is nothing left to do.
This problem exist also for the last image in the sequence, so the delay can't be a part of the next image generation process. The delay seems to be greater the slower the generation process.

What you expected to happen

I expect that the GUI shows that an image is ready, just after the image is written to the output folder. And I assume that the next image generation is started immediately thereafter.

How to reproduce the problem

Just generate more than one image in flux.

Additional context

It happens all the time for flux. Maybe it happens also for other models as SDXL, but then the delays are too small to be noticeable.

Discord username

Samtiger.ger

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't working

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions