Skip to content

Commit ed4f322

Browse files
committed
Add --network-mode option
Signed-off-by: Joshua Stone <[email protected]>
1 parent adc3bbe commit ed4f322

7 files changed

+47
-1
lines changed

docs/ramalama-bench.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ URL support means if a model is on a web site or even on your local system, you
2828
#### **--help**, **-h**
2929
show this help message and exit
3030

31+
#### **--network-mode**=*none*
32+
set the network mode for the container
33+
3134
## DESCRIPTION
3235
Benchmark specified AI Model.
3336

docs/ramalama-convert.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ type of OCI Model Image to convert.
2525
| car | Includes base image with the model stored in a /models subdir |
2626
| raw | Only the model and a link file model.file to it stored at / |
2727

28+
#### **--network-mode**=*none*
29+
sets the configuration for network namespaces when handling RUN instructions
30+
2831
## EXAMPLE
2932

3033
Generate an oci model out of an Ollama model.

docs/ramalama-run.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,9 @@ llama.cpp explains this as:
5353
#### **--tls-verify**=*true*
5454
require HTTPS and verify certificates when contacting OCI registries
5555

56+
#### **--network-mode**=*none*
57+
set the network mode for the container
58+
5659
## DESCRIPTION
5760
Run specified AI Model as a chat bot. RamaLama pulls specified AI Model from
5861
registry if it does not exist in local storage. By default a prompt for a chat

docs/ramalama-serve.1.md

+3
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,9 @@ IP address for llama.cpp to listen on.
6464
#### **--name**, **-n**
6565
Name of the container to run the Model in.
6666

67+
#### **--network-mode**=*default*
68+
set the network mode for the container
69+
6770
#### **--port**, **-p**
6871
port for AI Model server to listen on
6972

ramalama/cli.py

+33
Original file line numberDiff line numberDiff line change
@@ -379,6 +379,12 @@ def bench_cli(args):
379379

380380
def bench_parser(subparsers):
381381
parser = subparsers.add_parser("bench", aliases=["benchmark"], help="benchmark specified AI Model")
382+
parser.add_argument(
383+
"--network-mode",
384+
type=str,
385+
default="none",
386+
help="set the network mode for the container",
387+
)
382388
parser.add_argument("MODEL") # positional argument
383389
parser.set_defaults(func=bench_cli)
384390

@@ -600,6 +606,13 @@ def convert_parser(subparsers):
600606
Model "car" includes base image with the model stored in a /models subdir.
601607
Model "raw" contains the model and a link file model.file to it stored at /.""",
602608
)
609+
# https://docs.podman.io/en/latest/markdown/podman-build.1.html#network-mode-net
610+
parser.add_argument(
611+
"--network-mode",
612+
type=str,
613+
default="none",
614+
help="sets the configuration for network namespaces when handling RUN instructions",
615+
)
603616
parser.add_argument("SOURCE") # positional argument
604617
parser.add_argument("TARGET") # positional argument
605618
parser.set_defaults(func=convert_cli)
@@ -717,6 +730,15 @@ def _run(parser):
717730
def run_parser(subparsers):
718731
parser = subparsers.add_parser("run", help="run specified AI Model as a chatbot")
719732
_run(parser)
733+
# Disable network access by default, and give the option to pass any supported network mode into
734+
# podman if needed:
735+
# https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net
736+
parser.add_argument(
737+
"--network-mode",
738+
type=str,
739+
default="none",
740+
help="set the network mode for the container",
741+
)
720742
parser.add_argument("MODEL") # positional argument
721743
parser.add_argument(
722744
"ARGS", nargs="*", help="Overrides the default prompt, and the output is returned without entering the chatbot"
@@ -742,6 +764,17 @@ def serve_parser(subparsers):
742764
parser.add_argument(
743765
"-p", "--port", default=config.get('port', "8080"), help="port for AI Model server to listen on"
744766
)
767+
# --network-mode=default lets the container listen on localhost, and is an option that's compatible
768+
# with podman and docker. It should use the bridge driver for rootful podman, the pasta driver for
769+
# rootless podman, and the bridge driver for docker:
770+
# https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net
771+
# https://docs.docker.com/engine/network/#drivers
772+
parser.add_argument(
773+
"--network-mode",
774+
type=str,
775+
default="default",
776+
help="set the network mode for the container",
777+
)
745778
parser.add_argument("MODEL") # positional argument
746779
parser.set_defaults(func=serve_cli)
747780

ramalama/model.py

+1
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,7 @@ def setup_container(self, args):
153153
"-i",
154154
"--label",
155155
"RAMALAMA",
156+
f"--network={args.network_mode}",
156157
"--security-opt=label=disable",
157158
"--name",
158159
name,

ramalama/oci.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def build(self, source, target, args):
174174
else:
175175
c.write(model_raw)
176176
imageid = (
177-
run_cmd([self.conman, "build", "--no-cache", "-q", "-f", containerfile.name, contextdir], debug=args.debug)
177+
run_cmd([self.conman, "build", "--no-cache", f"--network={args.network_mode}", "-q", "-f", containerfile.name, contextdir], debug=args.debug)
178178
.stdout.decode("utf-8")
179179
.strip()
180180
)

0 commit comments

Comments
 (0)