2023-04-02 14:41:48 +00:00
|
|
|
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
|
|
|
|
2023-07-20 02:16:30 +00:00
|
|
|
# You don't have to copy this file to your instance,
|
2024-10-29 06:49:55 +00:00
|
|
|
# just run `forgejo-runner generate-config > config.yaml` to generate a config file.
|
2023-07-20 02:16:30 +00:00
|
|
|
|
2023-04-02 14:41:48 +00:00
|
|
|
log:
|
|
|
|
# The level of logging, can be trace, debug, info, warn, error, fatal
|
|
|
|
level: info
|
2024-10-20 12:32:56 +00:00
|
|
|
# The level of logging for jobs, can be trace, debug, info, earn, error, fatal
|
|
|
|
job_level: info
|
2023-04-02 14:41:48 +00:00
|
|
|
|
|
|
|
runner:
|
|
|
|
# Where to store the registration result.
|
|
|
|
file: .runner
|
|
|
|
# Execute how many tasks concurrently at the same time.
|
|
|
|
capacity: 1
|
|
|
|
# Extra environment variables to run jobs.
|
|
|
|
envs:
|
|
|
|
A_TEST_ENV_NAME_1: a_test_env_value_1
|
|
|
|
A_TEST_ENV_NAME_2: a_test_env_value_2
|
|
|
|
# Extra environment variables to run jobs from a file.
|
|
|
|
# It will be ignored if it's empty or the file doesn't exist.
|
|
|
|
env_file: .env
|
|
|
|
# The timeout for a job to be finished.
|
2023-05-30 10:47:12 +00:00
|
|
|
# Please note that the Forgejo instance also has a timeout (3h by default) for the job.
|
|
|
|
# So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
|
2023-04-02 14:41:48 +00:00
|
|
|
timeout: 3h
|
2024-06-06 09:40:31 +00:00
|
|
|
# The timeout for the runner to wait for running jobs to finish when
|
|
|
|
# shutting down because a TERM or INT signal has been received. Any
|
|
|
|
# running jobs that haven't finished after this timeout will be
|
|
|
|
# cancelled.
|
|
|
|
# If unset or zero the jobs will be cancelled immediately.
|
|
|
|
shutdown_timeout: 3h
|
|
|
|
# Whether skip verifying the TLS certificate of the instance.
|
2023-04-02 14:41:48 +00:00
|
|
|
insecure: false
|
2023-05-30 10:47:12 +00:00
|
|
|
# The timeout for fetching the job from the Forgejo instance.
|
2023-04-06 02:57:36 +00:00
|
|
|
fetch_timeout: 5s
|
2023-05-30 10:47:12 +00:00
|
|
|
# The interval for fetching the job from the Forgejo instance.
|
2023-04-06 02:57:36 +00:00
|
|
|
fetch_interval: 2s
|
2024-07-27 14:51:45 +00:00
|
|
|
# The interval for reporting the job status and logs to the Forgejo instance.
|
|
|
|
report_interval: 1s
|
2023-06-15 03:59:15 +00:00
|
|
|
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
2024-06-07 08:13:36 +00:00
|
|
|
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:20-bookworm", "ubuntu-22.04:docker://node:20-bookworm"]
|
2023-06-15 03:59:15 +00:00
|
|
|
# If it's empty when registering, it will ask for inputting labels.
|
2024-10-29 06:49:55 +00:00
|
|
|
# If it's empty when executing the `daemon`, it will use labels in the `.runner` file.
|
2023-06-15 03:59:15 +00:00
|
|
|
labels: []
|
2023-04-02 14:41:48 +00:00
|
|
|
|
|
|
|
cache:
|
|
|
|
# Enable cache server to use actions/cache.
|
|
|
|
enabled: true
|
|
|
|
# The directory to store the cache data.
|
|
|
|
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
|
|
|
dir: ""
|
|
|
|
# The host of the cache server.
|
|
|
|
# It's not for the address to listen, but the address to connect from job containers.
|
|
|
|
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
|
|
|
host: ""
|
|
|
|
# The port of the cache server.
|
|
|
|
# 0 means to use a random available port.
|
|
|
|
port: 0
|
2023-07-07 08:28:54 +00:00
|
|
|
# The external cache server URL. Valid only when enable is true.
|
2024-10-29 06:49:55 +00:00
|
|
|
# If it's specified, it will be used to set the ACTIONS_CACHE_URL environment variable. The URL should generally end with "/".
|
|
|
|
# Otherwise it will be set to the the URL of the internal cache server.
|
2023-07-07 08:28:54 +00:00
|
|
|
external_server: ""
|
2023-04-04 06:32:01 +00:00
|
|
|
|
|
|
|
container:
|
Add configuration item of `container.network` (#184)
Close https://gitea.com/gitea/act_runner/issues/177
Related https://gitea.com/gitea/act/pulls/56
### ⚠️ Breaking
The `container.network_mode` is a deprecated configuration item. It may be removed after Gitea 1.20 released.
Previously, if the value of `container.network_mode` is `bridge`, it means that `act_runner` will create a new network for job.But `bridge` is easily confused with the bridge network created by Docker by default.
We recommand that using `container.network` to specify the network to which containers created by `act_runner` connect.
### 🆕 container.network
The configuration file of `act_runner` add a new item of `contianer.network`.
In `config.example.yaml`:
```yaml
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
```
As the comment in the example above says, the purpose of the `container.network` is specifying the network to which containers created by `act_runner` will connect.
`container.network` accepts the following valid values:
- `host`: All of containers (including job containers and service contianers) created by `act_runner` will be connected to the network named `host` which is created automatically by Docker. Containers will share the host’s network stack and all interfaces from the host will be available to these containers.
- `bridge`: It is similar to `host`. All of containers created by `act_runner` will be connected to the network named `bridge` which is created automatically by Docker. All containers connected to the `bridge` (Perhaps there are containers that are not created by `act_runner`) are allowed to communicate with each other, while providing isolation from containers which are not connected to that `bridge` network.
- `<custom_network>`: Please make sure that the `<custom_network>` network already exists firstly (`act_runner` does not detect whether the specified network exists currently. If not exists yet, will return error in the stage of `docker create`). All of containers created by `act_runner` will be connected to `<custom_network>`. After the job is executed, containers are removed and automatically disconnected from the `<custom_network>`.
- empty: `act_runner` will create a new network for each job container and their service containers (if defined in workflow). So each job container and their service containers share a network environment, but are isolated from others container and the Docker host. Of course, these networks created by `act_runner` will be removed at last.
### Others
- If you do not have special needs, we highly recommend that setting `container.network` to empty string (and do not use `container.network_mode` any more). Because the containers created by `act_runner` will connect to the networks that are created by itself. This point will provide better isolation.
- If you set `contianer.network` to empty string or `<custom_network>`, we can be access to service containers by `<service-id>:<port>` in the steps of job. Because we added an alias to the service container when connecting to the network.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act_runner/pulls/184
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 06:46:59 +00:00
|
|
|
# Specifies the network to which the container will connect.
|
|
|
|
# Could be host, bridge or the name of a custom network.
|
2023-05-30 10:47:12 +00:00
|
|
|
# If it's empty, create a network automatically.
|
Add configuration item of `container.network` (#184)
Close https://gitea.com/gitea/act_runner/issues/177
Related https://gitea.com/gitea/act/pulls/56
### ⚠️ Breaking
The `container.network_mode` is a deprecated configuration item. It may be removed after Gitea 1.20 released.
Previously, if the value of `container.network_mode` is `bridge`, it means that `act_runner` will create a new network for job.But `bridge` is easily confused with the bridge network created by Docker by default.
We recommand that using `container.network` to specify the network to which containers created by `act_runner` connect.
### 🆕 container.network
The configuration file of `act_runner` add a new item of `contianer.network`.
In `config.example.yaml`:
```yaml
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
```
As the comment in the example above says, the purpose of the `container.network` is specifying the network to which containers created by `act_runner` will connect.
`container.network` accepts the following valid values:
- `host`: All of containers (including job containers and service contianers) created by `act_runner` will be connected to the network named `host` which is created automatically by Docker. Containers will share the host’s network stack and all interfaces from the host will be available to these containers.
- `bridge`: It is similar to `host`. All of containers created by `act_runner` will be connected to the network named `bridge` which is created automatically by Docker. All containers connected to the `bridge` (Perhaps there are containers that are not created by `act_runner`) are allowed to communicate with each other, while providing isolation from containers which are not connected to that `bridge` network.
- `<custom_network>`: Please make sure that the `<custom_network>` network already exists firstly (`act_runner` does not detect whether the specified network exists currently. If not exists yet, will return error in the stage of `docker create`). All of containers created by `act_runner` will be connected to `<custom_network>`. After the job is executed, containers are removed and automatically disconnected from the `<custom_network>`.
- empty: `act_runner` will create a new network for each job container and their service containers (if defined in workflow). So each job container and their service containers share a network environment, but are isolated from others container and the Docker host. Of course, these networks created by `act_runner` will be removed at last.
### Others
- If you do not have special needs, we highly recommend that setting `container.network` to empty string (and do not use `container.network_mode` any more). Because the containers created by `act_runner` will connect to the networks that are created by itself. This point will provide better isolation.
- If you set `contianer.network` to empty string or `<custom_network>`, we can be access to service containers by `<service-id>:<port>` in the steps of job. Because we added an alias to the service container when connecting to the network.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act_runner/pulls/184
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 06:46:59 +00:00
|
|
|
network: ""
|
2023-11-14 18:16:09 +00:00
|
|
|
# Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
|
|
|
|
# Only takes effect if "network" is set to "".
|
|
|
|
enable_ipv6: false
|
2023-04-11 02:58:12 +00:00
|
|
|
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
|
|
|
privileged: false
|
2023-05-30 10:47:12 +00:00
|
|
|
# And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway).
|
2023-04-11 02:58:12 +00:00
|
|
|
options:
|
2023-04-28 14:03:52 +00:00
|
|
|
# The parent directory of a job's working directory.
|
|
|
|
# If it's empty, /workspace will be used.
|
|
|
|
workdir_parent:
|
2023-06-16 06:07:48 +00:00
|
|
|
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
|
|
|
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
|
|
|
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
|
|
|
# valid_volumes:
|
|
|
|
# - data
|
|
|
|
# - /src/*.json
|
|
|
|
# If you want to allow any volume, please use the following configuration:
|
|
|
|
# valid_volumes:
|
|
|
|
# - '**'
|
|
|
|
valid_volumes: []
|
2023-06-18 05:38:38 +00:00
|
|
|
# overrides the docker client host with the specified one.
|
2024-11-27 00:28:35 +00:00
|
|
|
# If "-" or "", an available docker host will automatically be found.
|
|
|
|
# If "automount", an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock).
|
2024-10-29 06:49:55 +00:00
|
|
|
# Otherwise the specified docker host will be used and an error will be returned if it doesn't work.
|
|
|
|
docker_host: "-"
|
2023-08-17 06:51:57 +00:00
|
|
|
# Pull docker image(s) even if already present
|
|
|
|
force_pull: false
|
2023-06-20 08:29:05 +00:00
|
|
|
|
|
|
|
host:
|
|
|
|
# The parent directory of a job's working directory.
|
|
|
|
# If it's empty, $HOME/.cache/act/ will be used.
|
|
|
|
workdir_parent:
|