2023-03-08 02:55:31 +00:00
// Copyright 2023 The Gitea Authors. All rights reserved.
// Copyright 2019 nektos
// SPDX-License-Identifier: MIT
package cmd
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
Add configuration item of `container.network` (#184)
Close https://gitea.com/gitea/act_runner/issues/177
Related https://gitea.com/gitea/act/pulls/56
### ⚠️ Breaking
The `container.network_mode` is a deprecated configuration item. It may be removed after Gitea 1.20 released.
Previously, if the value of `container.network_mode` is `bridge`, it means that `act_runner` will create a new network for job.But `bridge` is easily confused with the bridge network created by Docker by default.
We recommand that using `container.network` to specify the network to which containers created by `act_runner` connect.
### 🆕 container.network
The configuration file of `act_runner` add a new item of `contianer.network`.
In `config.example.yaml`:
```yaml
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
```
As the comment in the example above says, the purpose of the `container.network` is specifying the network to which containers created by `act_runner` will connect.
`container.network` accepts the following valid values:
- `host`: All of containers (including job containers and service contianers) created by `act_runner` will be connected to the network named `host` which is created automatically by Docker. Containers will share the host’s network stack and all interfaces from the host will be available to these containers.
- `bridge`: It is similar to `host`. All of containers created by `act_runner` will be connected to the network named `bridge` which is created automatically by Docker. All containers connected to the `bridge` (Perhaps there are containers that are not created by `act_runner`) are allowed to communicate with each other, while providing isolation from containers which are not connected to that `bridge` network.
- `<custom_network>`: Please make sure that the `<custom_network>` network already exists firstly (`act_runner` does not detect whether the specified network exists currently. If not exists yet, will return error in the stage of `docker create`). All of containers created by `act_runner` will be connected to `<custom_network>`. After the job is executed, containers are removed and automatically disconnected from the `<custom_network>`.
- empty: `act_runner` will create a new network for each job container and their service containers (if defined in workflow). So each job container and their service containers share a network environment, but are isolated from others container and the Docker host. Of course, these networks created by `act_runner` will be removed at last.
### Others
- If you do not have special needs, we highly recommend that setting `container.network` to empty string (and do not use `container.network_mode` any more). Because the containers created by `act_runner` will connect to the networks that are created by itself. This point will provide better isolation.
- If you set `contianer.network` to empty string or `<custom_network>`, we can be access to service containers by `<service-id>:<port>` in the steps of job. Because we added an alias to the service container when connecting to the network.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act_runner/pulls/184
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 06:46:59 +00:00
"github.com/docker/docker/api/types/container"
2023-03-08 02:55:31 +00:00
"github.com/joho/godotenv"
2023-05-04 10:45:01 +00:00
"github.com/nektos/act/pkg/artifactcache"
2023-03-08 02:55:31 +00:00
"github.com/nektos/act/pkg/artifacts"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
"github.com/nektos/act/pkg/runner"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/term"
)
type executeArgs struct {
runList bool
job string
event string
workdir string
workflowsPath string
noWorkflowRecurse bool
autodetectEvent bool
forcePull bool
forceRebuild bool
jsonLogger bool
envs [ ] string
envfile string
secrets [ ] string
defaultActionsUrl string
insecureSecrets bool
privileged bool
usernsMode string
containerArchitecture string
containerDaemonSocket string
useGitIgnore bool
containerCapAdd [ ] string
containerCapDrop [ ] string
2023-04-11 02:58:12 +00:00
containerOptions string
2023-03-08 02:55:31 +00:00
artifactServerPath string
2023-03-17 01:45:46 +00:00
artifactServerAddr string
2023-03-08 02:55:31 +00:00
artifactServerPort string
noSkipCheckout bool
debug bool
dryrun bool
2023-03-29 01:42:53 +00:00
image string
2023-03-08 02:55:31 +00:00
cacheHandler * artifactcache . Handler
2023-05-18 07:01:43 +00:00
network string
2023-03-08 02:55:31 +00:00
}
// WorkflowsPath returns path to workflow file(s)
func ( i * executeArgs ) WorkflowsPath ( ) string {
return i . resolve ( i . workflowsPath )
}
// Envfile returns path to .env
func ( i * executeArgs ) Envfile ( ) string {
return i . resolve ( i . envfile )
}
func ( i * executeArgs ) LoadSecrets ( ) map [ string ] string {
s := make ( map [ string ] string )
for _ , secretPair := range i . secrets {
secretPairParts := strings . SplitN ( secretPair , "=" , 2 )
secretPairParts [ 0 ] = strings . ToUpper ( secretPairParts [ 0 ] )
if strings . ToUpper ( s [ secretPairParts [ 0 ] ] ) == secretPairParts [ 0 ] {
log . Errorf ( "Secret %s is already defined (secrets are case insensitive)" , secretPairParts [ 0 ] )
}
if len ( secretPairParts ) == 2 {
s [ secretPairParts [ 0 ] ] = secretPairParts [ 1 ]
} else if env , ok := os . LookupEnv ( secretPairParts [ 0 ] ) ; ok && env != "" {
s [ secretPairParts [ 0 ] ] = env
} else {
fmt . Printf ( "Provide value for '%s': " , secretPairParts [ 0 ] )
val , err := term . ReadPassword ( int ( os . Stdin . Fd ( ) ) )
fmt . Println ( )
if err != nil {
log . Errorf ( "failed to read input: %v" , err )
os . Exit ( 1 )
}
s [ secretPairParts [ 0 ] ] = string ( val )
}
}
return s
}
func readEnvs ( path string , envs map [ string ] string ) bool {
if _ , err := os . Stat ( path ) ; err == nil {
env , err := godotenv . Read ( path )
if err != nil {
log . Fatalf ( "Error loading from %s: %v" , path , err )
}
for k , v := range env {
envs [ k ] = v
}
return true
}
return false
}
func ( i * executeArgs ) LoadEnvs ( ) map [ string ] string {
envs := make ( map [ string ] string )
if i . envs != nil {
for _ , envVar := range i . envs {
e := strings . SplitN ( envVar , ` = ` , 2 )
if len ( e ) == 2 {
envs [ e [ 0 ] ] = e [ 1 ]
} else {
envs [ e [ 0 ] ] = ""
}
}
}
_ = readEnvs ( i . Envfile ( ) , envs )
envs [ "ACTIONS_CACHE_URL" ] = i . cacheHandler . ExternalURL ( ) + "/"
return envs
}
// Workdir returns path to workdir
func ( i * executeArgs ) Workdir ( ) string {
return i . resolve ( "." )
}
func ( i * executeArgs ) resolve ( path string ) string {
basedir , err := filepath . Abs ( i . workdir )
if err != nil {
log . Fatal ( err )
}
if path == "" {
return path
}
if ! filepath . IsAbs ( path ) {
path = filepath . Join ( basedir , path )
}
return path
}
func printList ( plan * model . Plan ) error {
type lineInfoDef struct {
jobID string
jobName string
stage string
wfName string
wfFile string
events string
}
lineInfos := [ ] lineInfoDef { }
header := lineInfoDef {
jobID : "Job ID" ,
jobName : "Job name" ,
stage : "Stage" ,
wfName : "Workflow name" ,
wfFile : "Workflow file" ,
events : "Events" ,
}
jobs := map [ string ] bool { }
duplicateJobIDs := false
jobIDMaxWidth := len ( header . jobID )
jobNameMaxWidth := len ( header . jobName )
stageMaxWidth := len ( header . stage )
wfNameMaxWidth := len ( header . wfName )
wfFileMaxWidth := len ( header . wfFile )
eventsMaxWidth := len ( header . events )
for i , stage := range plan . Stages {
for _ , r := range stage . Runs {
jobID := r . JobID
line := lineInfoDef {
jobID : jobID ,
jobName : r . String ( ) ,
stage : strconv . Itoa ( i ) ,
wfName : r . Workflow . Name ,
wfFile : r . Workflow . File ,
events : strings . Join ( r . Workflow . On ( ) , ` , ` ) ,
}
if _ , ok := jobs [ jobID ] ; ok {
duplicateJobIDs = true
} else {
jobs [ jobID ] = true
}
lineInfos = append ( lineInfos , line )
if jobIDMaxWidth < len ( line . jobID ) {
jobIDMaxWidth = len ( line . jobID )
}
if jobNameMaxWidth < len ( line . jobName ) {
jobNameMaxWidth = len ( line . jobName )
}
if stageMaxWidth < len ( line . stage ) {
stageMaxWidth = len ( line . stage )
}
if wfNameMaxWidth < len ( line . wfName ) {
wfNameMaxWidth = len ( line . wfName )
}
if wfFileMaxWidth < len ( line . wfFile ) {
wfFileMaxWidth = len ( line . wfFile )
}
if eventsMaxWidth < len ( line . events ) {
eventsMaxWidth = len ( line . events )
}
}
}
jobIDMaxWidth += 2
jobNameMaxWidth += 2
stageMaxWidth += 2
wfNameMaxWidth += 2
wfFileMaxWidth += 2
fmt . Printf ( "%*s%*s%*s%*s%*s%*s\n" ,
- stageMaxWidth , header . stage ,
- jobIDMaxWidth , header . jobID ,
- jobNameMaxWidth , header . jobName ,
- wfNameMaxWidth , header . wfName ,
- wfFileMaxWidth , header . wfFile ,
- eventsMaxWidth , header . events ,
)
for _ , line := range lineInfos {
fmt . Printf ( "%*s%*s%*s%*s%*s%*s\n" ,
- stageMaxWidth , line . stage ,
- jobIDMaxWidth , line . jobID ,
- jobNameMaxWidth , line . jobName ,
- wfNameMaxWidth , line . wfName ,
- wfFileMaxWidth , line . wfFile ,
- eventsMaxWidth , line . events ,
)
}
if duplicateJobIDs {
fmt . Print ( "\nDetected multiple jobs with the same job name, use `-W` to specify the path to the specific workflow.\n" )
}
return nil
}
func runExecList ( ctx context . Context , planner model . WorkflowPlanner , execArgs * executeArgs ) error {
// plan with filtered jobs - to be used for filtering only
var filterPlan * model . Plan
// Determine the event name to be filtered
var filterEventName string = ""
if len ( execArgs . event ) > 0 {
log . Infof ( "Using chosed event for filtering: %s" , execArgs . event )
filterEventName = execArgs . event
} else if execArgs . autodetectEvent {
// collect all events from loaded workflows
events := planner . GetEvents ( )
// set default event type to first event from many available
// this way user dont have to specify the event.
log . Infof ( "Using first detected workflow event for filtering: %s" , events [ 0 ] )
filterEventName = events [ 0 ]
}
2023-03-17 01:45:46 +00:00
var err error
2023-03-08 02:55:31 +00:00
if execArgs . job != "" {
log . Infof ( "Preparing plan with a job: %s" , execArgs . job )
2023-03-17 01:45:46 +00:00
filterPlan , err = planner . PlanJob ( execArgs . job )
if err != nil {
return err
}
2023-03-08 02:55:31 +00:00
} else if filterEventName != "" {
log . Infof ( "Preparing plan for a event: %s" , filterEventName )
2023-03-17 01:45:46 +00:00
filterPlan , err = planner . PlanEvent ( filterEventName )
if err != nil {
return err
}
2023-03-08 02:55:31 +00:00
} else {
log . Infof ( "Preparing plan with all jobs" )
2023-03-17 01:45:46 +00:00
filterPlan , err = planner . PlanAll ( )
if err != nil {
return err
}
2023-03-08 02:55:31 +00:00
}
printList ( filterPlan )
return nil
}
func runExec ( ctx context . Context , execArgs * executeArgs ) func ( cmd * cobra . Command , args [ ] string ) error {
return func ( cmd * cobra . Command , args [ ] string ) error {
planner , err := model . NewWorkflowPlanner ( execArgs . WorkflowsPath ( ) , execArgs . noWorkflowRecurse )
if err != nil {
return err
}
if execArgs . runList {
return runExecList ( ctx , planner , execArgs )
}
// plan with triggered jobs
var plan * model . Plan
// Determine the event name to be triggered
var eventName string
// collect all events from loaded workflows
events := planner . GetEvents ( )
if len ( execArgs . event ) > 0 {
log . Infof ( "Using chosed event for filtering: %s" , execArgs . event )
2023-05-06 03:27:08 +00:00
eventName = execArgs . event
2023-03-08 02:55:31 +00:00
} else if len ( events ) == 1 && len ( events [ 0 ] ) > 0 {
log . Infof ( "Using the only detected workflow event: %s" , events [ 0 ] )
eventName = events [ 0 ]
} else if execArgs . autodetectEvent && len ( events ) > 0 && len ( events [ 0 ] ) > 0 {
// set default event type to first event from many available
// this way user dont have to specify the event.
log . Infof ( "Using first detected workflow event: %s" , events [ 0 ] )
eventName = events [ 0 ]
} else {
log . Infof ( "Using default workflow event: push" )
eventName = "push"
}
// build the plan for this run
if execArgs . job != "" {
log . Infof ( "Planning job: %s" , execArgs . job )
2023-03-17 01:45:46 +00:00
plan , err = planner . PlanJob ( execArgs . job )
if err != nil {
return err
}
2023-03-08 02:55:31 +00:00
} else {
log . Infof ( "Planning jobs for event: %s" , eventName )
2023-03-17 01:45:46 +00:00
plan , err = planner . PlanEvent ( eventName )
if err != nil {
return err
}
2023-03-08 02:55:31 +00:00
}
maxLifetime := 3 * time . Hour
if deadline , ok := ctx . Deadline ( ) ; ok {
maxLifetime = time . Until ( deadline )
}
// init a cache server
2023-05-04 10:45:01 +00:00
handler , err := artifactcache . StartHandler ( "" , "" , 0 , log . StandardLogger ( ) . WithField ( "module" , "cache_request" ) )
2023-03-08 02:55:31 +00:00
if err != nil {
return err
}
log . Infof ( "cache handler listens on: %v" , handler . ExternalURL ( ) )
execArgs . cacheHandler = handler
2023-06-05 08:51:44 +00:00
if len ( execArgs . artifactServerAddr ) == 0 {
if ip := common . GetOutboundIP ( ) ; ip == nil {
return fmt . Errorf ( "unable to determine outbound IP address" )
} else {
execArgs . artifactServerAddr = ip . String ( )
}
}
if len ( execArgs . artifactServerPath ) == 0 {
tempDir , err := os . MkdirTemp ( "" , "gitea-act-" )
if err != nil {
fmt . Println ( err )
}
defer os . RemoveAll ( tempDir )
execArgs . artifactServerPath = tempDir
}
2023-03-08 02:55:31 +00:00
// run the plan
config := & runner . Config {
Workdir : execArgs . Workdir ( ) ,
BindWorkdir : false ,
ReuseContainers : false ,
ForcePull : execArgs . forcePull ,
ForceRebuild : execArgs . forceRebuild ,
LogOutput : true ,
JSONLogger : execArgs . jsonLogger ,
Env : execArgs . LoadEnvs ( ) ,
Secrets : execArgs . LoadSecrets ( ) ,
InsecureSecrets : execArgs . insecureSecrets ,
Privileged : execArgs . privileged ,
UsernsMode : execArgs . usernsMode ,
ContainerArchitecture : execArgs . containerArchitecture ,
ContainerDaemonSocket : execArgs . containerDaemonSocket ,
UseGitIgnore : execArgs . useGitIgnore ,
// GitHubInstance: t.client.Address(),
ContainerCapAdd : execArgs . containerCapAdd ,
ContainerCapDrop : execArgs . containerCapDrop ,
2023-04-11 02:58:12 +00:00
ContainerOptions : execArgs . containerOptions ,
2023-03-08 02:55:31 +00:00
AutoRemove : true ,
ArtifactServerPath : execArgs . artifactServerPath ,
ArtifactServerPort : execArgs . artifactServerPort ,
2023-06-05 08:51:44 +00:00
ArtifactServerAddr : execArgs . artifactServerAddr ,
2023-03-08 02:55:31 +00:00
NoSkipCheckout : execArgs . noSkipCheckout ,
// PresetGitHubContext: preset,
// EventJSON: string(eventJSON),
ContainerNamePrefix : fmt . Sprintf ( "GITEA-ACTIONS-TASK-%s" , eventName ) ,
ContainerMaxLifetime : maxLifetime ,
2023-05-18 07:01:43 +00:00
ContainerNetworkMode : container . NetworkMode ( execArgs . network ) ,
2023-03-08 02:55:31 +00:00
DefaultActionInstance : execArgs . defaultActionsUrl ,
PlatformPicker : func ( _ [ ] string ) string {
2023-03-29 01:42:53 +00:00
return execArgs . image
2023-03-08 02:55:31 +00:00
} ,
}
2023-05-06 03:27:08 +00:00
if ! execArgs . debug {
logLevel := log . Level ( log . InfoLevel )
config . JobLoggerLevel = & logLevel
}
2023-03-08 02:55:31 +00:00
r , err := runner . New ( config )
if err != nil {
return err
}
2023-03-17 01:45:46 +00:00
artifactCancel := artifacts . Serve ( ctx , execArgs . artifactServerPath , execArgs . artifactServerAddr , execArgs . artifactServerPort )
2023-03-08 02:55:31 +00:00
log . Debugf ( "artifacts server started at %s:%s" , execArgs . artifactServerPath , execArgs . artifactServerPort )
ctx = common . WithDryrun ( ctx , execArgs . dryrun )
executor := r . NewPlanExecutor ( plan ) . Finally ( func ( ctx context . Context ) error {
artifactCancel ( )
return nil
} )
return executor ( ctx )
}
}
func loadExecCmd ( ctx context . Context ) * cobra . Command {
execArg := executeArgs { }
execCmd := & cobra . Command {
Use : "exec" ,
Short : "Run workflow locally." ,
Args : cobra . MaximumNArgs ( 20 ) ,
RunE : runExec ( ctx , & execArg ) ,
}
execCmd . Flags ( ) . BoolVarP ( & execArg . runList , "list" , "l" , false , "list workflows" )
execCmd . Flags ( ) . StringVarP ( & execArg . job , "job" , "j" , "" , "run a specific job ID" )
execCmd . Flags ( ) . StringVarP ( & execArg . event , "event" , "E" , "" , "run a event name" )
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . workflowsPath , "workflows" , "W" , "./.gitea/workflows/" , "path to workflow file(s)" )
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . workdir , "directory" , "C" , "." , "working directory" )
execCmd . PersistentFlags ( ) . BoolVarP ( & execArg . noWorkflowRecurse , "no-recurse" , "" , false , "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag" )
execCmd . Flags ( ) . BoolVarP ( & execArg . autodetectEvent , "detect-event" , "" , false , "Use first event type from workflow as event that triggered the workflow" )
execCmd . Flags ( ) . BoolVarP ( & execArg . forcePull , "pull" , "p" , false , "pull docker image(s) even if already present" )
execCmd . Flags ( ) . BoolVarP ( & execArg . forceRebuild , "rebuild" , "" , false , "rebuild local action docker image(s) even if already present" )
execCmd . PersistentFlags ( ) . BoolVar ( & execArg . jsonLogger , "json" , false , "Output logs in json format" )
execCmd . Flags ( ) . StringArrayVarP ( & execArg . envs , "env" , "" , [ ] string { } , "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)" )
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . envfile , "env-file" , "" , ".env" , "environment file to read and use as env in the containers" )
execCmd . Flags ( ) . StringArrayVarP ( & execArg . secrets , "secret" , "s" , [ ] string { } , "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)" )
execCmd . PersistentFlags ( ) . BoolVarP ( & execArg . insecureSecrets , "insecure-secrets" , "" , false , "NOT RECOMMENDED! Doesn't hide secrets while printing logs." )
execCmd . Flags ( ) . BoolVar ( & execArg . privileged , "privileged" , false , "use privileged mode" )
execCmd . Flags ( ) . StringVar ( & execArg . usernsMode , "userns" , "" , "user namespace to use" )
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . containerArchitecture , "container-architecture" , "" , "" , "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms." )
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . containerDaemonSocket , "container-daemon-socket" , "" , "/var/run/docker.sock" , "Path to Docker daemon socket which will be mounted to containers" )
execCmd . Flags ( ) . BoolVar ( & execArg . useGitIgnore , "use-gitignore" , true , "Controls whether paths specified in .gitignore should be copied into container" )
execCmd . Flags ( ) . StringArrayVarP ( & execArg . containerCapAdd , "container-cap-add" , "" , [ ] string { } , "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)" )
execCmd . Flags ( ) . StringArrayVarP ( & execArg . containerCapDrop , "container-cap-drop" , "" , [ ] string { } , "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)" )
2023-04-11 02:58:12 +00:00
execCmd . Flags ( ) . StringVarP ( & execArg . containerOptions , "container-opts" , "" , "" , "container options" )
2023-03-08 02:55:31 +00:00
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . artifactServerPath , "artifact-server-path" , "" , "." , "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start." )
2023-06-05 08:51:44 +00:00
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . artifactServerAddr , "artifact-server-addr" , "" , "" , "Defines the address where the artifact server listens" )
2023-03-08 02:55:31 +00:00
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . artifactServerPort , "artifact-server-port" , "" , "34567" , "Defines the port where the artifact server listens (will only bind to localhost)." )
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . defaultActionsUrl , "default-actions-url" , "" , "https://gitea.com" , "Defines the default url of action instance." )
execCmd . PersistentFlags ( ) . BoolVarP ( & execArg . noSkipCheckout , "no-skip-checkout" , "" , false , "Do not skip actions/checkout" )
execCmd . PersistentFlags ( ) . BoolVarP ( & execArg . debug , "debug" , "d" , false , "enable debug log" )
execCmd . PersistentFlags ( ) . BoolVarP ( & execArg . dryrun , "dryrun" , "n" , false , "dryrun mode" )
2023-03-29 01:42:53 +00:00
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . image , "image" , "i" , "node:16-bullseye" , "docker image to use" )
2023-05-18 07:01:43 +00:00
execCmd . PersistentFlags ( ) . StringVarP ( & execArg . network , "network" , "" , "" , "Specify the network to which the container will connect" )
2023-03-08 02:55:31 +00:00
return execCmd
}