diff --git a/cmd/osmanage/main.go b/cmd/osmanage/main.go index a15b134..94239e0 100644 --- a/cmd/osmanage/main.go +++ b/cmd/osmanage/main.go @@ -4,6 +4,7 @@ import ( "fmt" "os" + grpcServer "github.com/OpenSlides/openslides-cli/internal/grpc/server" "github.com/OpenSlides/openslides-cli/internal/instance/config" "github.com/OpenSlides/openslides-cli/internal/instance/create" "github.com/OpenSlides/openslides-cli/internal/instance/remove" @@ -81,6 +82,8 @@ func RootCmd() *cobra.Command { k8sActions.UpdateInstanceCmd(), k8sActions.ScaleCmd(), k8sActions.GetServiceAddressCmd(), + k8sActions.GetNamespaceExistsCmd(), + k8sActions.GetInstanceStatusCmd(), ) rootCmd.AddCommand( @@ -96,6 +99,7 @@ func RootCmd() *cobra.Command { action.Cmd(), migrations.Cmd(), k8sCmd, + grpcServer.Cmd(), ) return rootCmd diff --git a/go.mod b/go.mod index aea3933..4333c62 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,13 @@ module github.com/OpenSlides/openslides-cli go 1.25.1 require ( - github.com/OpenSlides/openslides-go v0.0.0-20251104124242-d8e4b15bb11e + github.com/OpenSlides/openslides-go v0.0.0-20260317105801-e0e5c62bd3b1 github.com/schollz/progressbar/v3 v3.19.0 github.com/shopspring/decimal v1.4.0 github.com/spf13/cobra v1.10.2 golang.org/x/text v0.35.0 + google.golang.org/grpc v1.79.1 + google.golang.org/protobuf v1.36.10 k8s.io/api v0.35.2 k8s.io/apimachinery v0.35.2 k8s.io/client-go v0.35.2 @@ -26,7 +28,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.6 // indirect + github.com/jackc/pgx/v5 v5.8.0 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -39,14 +41,13 @@ require ( github.com/x448/float16 v0.8.4 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.44.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sync v0.20.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/term v0.38.0 // indirect golang.org/x/time v0.9.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 08879b7..5683e3e 100644 --- a/go.sum +++ b/go.sum @@ -8,35 +8,45 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OpenSlides/openslides-go v0.0.0-20251104124242-d8e4b15bb11e h1:pRKc33SGfaqfFeDBXhinpqni3cNEpBBlTaD413NZh+g= -github.com/OpenSlides/openslides-go v0.0.0-20251104124242-d8e4b15bb11e/go.mod h1:Em6jcRrIaNDy6pkWLJx5gLLFO61th+GNQCmD/0AQPtY= +github.com/OpenSlides/openslides-go v0.0.0-20260317105801-e0e5c62bd3b1 h1:db7ppOZIFbe4BzizvnoEU4i3kfS3Wn09EROr8XJhRUk= +github.com/OpenSlides/openslides-go v0.0.0-20260317105801-e0e5c62bd3b1/go.mod h1:cux3Aaojj2xnTPUfNFQpnWLfn/5mE0mbefu0xIoAwMo= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1ef64XEu4A= -github.com/docker/cli v28.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= -github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg= +github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= @@ -47,12 +57,12 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= -github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= -github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= +github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -70,8 +80,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= -github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= +github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -93,8 +103,12 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2Em github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w= +github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= +github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM= +github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -113,12 +127,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runc v1.2.6 h1:P7Hqg40bsMvQGCS4S7DJYhUZOISMLJOB2iGX5COWiPk= -github.com/opencontainers/runc v1.2.6/go.mod h1:dOQeFo29xZKBNeRBI0B19mJtfHv68YgCTh1X+YphA+4= +github.com/opencontainers/runc v1.3.1 h1:c/yY0oh2wK7tzDuD56REnSxyU8ubh8hoAIOLGLrm4SM= +github.com/opencontainers/runc v1.3.1/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs= github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= @@ -130,8 +142,8 @@ github.com/schollz/progressbar/v3 v3.19.0 h1:Ea18xuIRQXLAUidVDox3AbwfUhD0/1Ivohy github.com/schollz/progressbar/v3 v3.19.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= @@ -156,32 +168,50 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= -golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/internal/constants/constants.go b/internal/constants/constants.go index 5472d61..fcd9821 100644 --- a/internal/constants/constants.go +++ b/internal/constants/constants.go @@ -195,6 +195,15 @@ const ( // MigrationStatusRunning indicates a migration is currently in progress MigrationStatusRunning string = "migration_running" + // MigrationStatusFailed indicates a migration process has failed + MigrationStatusFailed string = "migration_failed" + + // FinalizationStatusRunning indicates a migration finalization is currently in progress + FinalizationStatusRunning string = "finalization_running" + + // FinalizationStatusFailed indicates a migration finalization process has failed + FinalizationStatusFailed string = "finalization_failed" + // MigrationMaxRetries is the maximum number of retry attempts for failed migration requests MigrationMaxRetries int = 5 @@ -253,3 +262,10 @@ func GetKindPriority(kind string) int { } return 100 } + +// gRPC server defaults +const ( + GRPCHost string = "127.0.0.1" + GRPCPort string = "50051" + GRPCGracefulStopTimeout time.Duration = 30 * time.Second +) diff --git a/internal/grpc/server/cluster_status.go b/internal/grpc/server/cluster_status.go new file mode 100644 index 0000000..e18ad32 --- /dev/null +++ b/internal/grpc/server/cluster_status.go @@ -0,0 +1,30 @@ +package server + +import ( + "context" + "fmt" + + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) GetClusterStatus(ctx context.Context, req *pb.GetClusterStatusRequest) (*pb.GetClusterStatusResponse, error) { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return nil, fmt.Errorf("creating k8s client: %w", err) + } + + status, err := actions.CheckClusterStatus(ctx, k8sClient) + if err != nil { + return nil, fmt.Errorf("checking cluster status: %w", err) + } + + statusMsg := fmt.Sprintf("Cluster: %d/%d nodes ready", status.ReadyNodes, status.TotalNodes) + + return &pb.GetClusterStatusResponse{ + Status: statusMsg, + TotalNodes: int32(status.TotalNodes), + ReadyNodes: int32(status.ReadyNodes), + }, nil +} diff --git a/internal/grpc/server/config.go b/internal/grpc/server/config.go new file mode 100644 index 0000000..376ae91 --- /dev/null +++ b/internal/grpc/server/config.go @@ -0,0 +1,23 @@ +package server + +import ( + "context" + + instanceconfig "github.com/OpenSlides/openslides-cli/internal/instance/config" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) ConfigInstance(ctx context.Context, req *pb.InstanceConfigRequest) (*pb.InstanceConfigResponse, error) { + err := instanceconfig.Run( + req.InstanceDir, + req.Force, + req.Clean, + req.StackTemplatePath, + nil, + req.Configs, + ) + if err != nil { + return &pb.InstanceConfigResponse{Success: false, Error: err.Error()}, nil + } + return &pb.InstanceConfigResponse{Success: true}, nil +} diff --git a/internal/grpc/server/create.go b/internal/grpc/server/create.go new file mode 100644 index 0000000..c155d76 --- /dev/null +++ b/internal/grpc/server/create.go @@ -0,0 +1,15 @@ +package server + +import ( + "context" + + "github.com/OpenSlides/openslides-cli/internal/instance/create" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) CreateInstance(ctx context.Context, req *pb.CreateInstanceRequest) (*pb.CreateInstanceResponse, error) { + if err := create.CreateInstance(req.InstanceDir, req.DbPassword, req.SuperadminPassword); err != nil { + return &pb.CreateInstanceResponse{Success: false, Error: err.Error()}, nil + } + return &pb.CreateInstanceResponse{Success: true}, nil +} diff --git a/internal/grpc/server/get.go b/internal/grpc/server/get.go new file mode 100644 index 0000000..2afed5a --- /dev/null +++ b/internal/grpc/server/get.go @@ -0,0 +1,36 @@ +package server + +import ( + "context" + + "github.com/OpenSlides/openslides-cli/internal/manage/actions/get" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) GetCollection( + ctx context.Context, + req *pb.GetCollectionRequest, +) (*pb.GetCollectionResponse, error) { + if req.DbConfig == nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: "db_config is required", + }, nil + } + if req.QueryParams == nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: "query_params is required", + }, nil + } + + result, err := get.ExecuteGetCollection(ctx, req.DbConfig, req.QueryParams) + if err != nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: err.Error(), + }, nil + } + + return result, nil +} diff --git a/internal/grpc/server/get_namespace_exists.go b/internal/grpc/server/get_namespace_exists.go new file mode 100644 index 0000000..fa2ec3a --- /dev/null +++ b/internal/grpc/server/get_namespace_exists.go @@ -0,0 +1,25 @@ +package server + +import ( + "context" + "strings" + + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) GetNamespaceExists(ctx context.Context, req *pb.GetNamespaceExistsRequest) (*pb.GetNamespaceExistsResponse, error) { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return &pb.GetNamespaceExistsResponse{Error: err.Error()}, nil + } + namespace := strings.ReplaceAll(req.InstanceUrl, ".", "") + + exists, err := actions.GetNamespaceExists(ctx, k8sClient.Clientset(), namespace) + if err != nil { + return &pb.GetNamespaceExistsResponse{Error: err.Error()}, nil + } + + return &pb.GetNamespaceExistsResponse{Exists: exists}, nil +} diff --git a/internal/grpc/server/get_service_address.go b/internal/grpc/server/get_service_address.go new file mode 100644 index 0000000..39fe969 --- /dev/null +++ b/internal/grpc/server/get_service_address.go @@ -0,0 +1,25 @@ +package server + +import ( + "context" + "strings" + + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) GetServiceAddress(ctx context.Context, req *pb.GetServiceAddressRequest) (*pb.GetServiceAddressResponse, error) { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return &pb.GetServiceAddressResponse{Error: err.Error()}, nil + } + namespace := strings.ReplaceAll(req.InstanceUrl, ".", "") + + address, err := actions.GetServiceAddress(ctx, k8sClient.Clientset(), namespace, req.ServiceName) + if err != nil { + return &pb.GetServiceAddressResponse{Error: err.Error()}, nil + } + + return &pb.GetServiceAddressResponse{Address: address}, nil +} diff --git a/internal/grpc/server/health.go b/internal/grpc/server/health.go new file mode 100644 index 0000000..0eb082e --- /dev/null +++ b/internal/grpc/server/health.go @@ -0,0 +1,89 @@ +package server + +import ( + "strings" + "time" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) GetInstanceHealth( + req *pb.GetInstanceHealthRequest, + stream pb.OsmanageService_GetInstanceHealthServer, +) error { + namespace := strings.ReplaceAll(req.InstanceUrl, ".", "") + + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return stream.Send(&pb.GetInstanceHealthResponse{ + Complete: true, + Error: err.Error(), + }) + } + + ctx := stream.Context() + + if req.Wait { + timeout := time.Duration(req.TimeoutSeconds) * time.Second + if timeout == 0 { + timeout = constants.DefaultInstanceTimeout + } + + streamCallback := func(status *actions.HealthStatus) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + return stream.Send(healthStatusToHealthResponse(status, false)) + } + + err := actions.WaitForInstanceHealthy(ctx, k8sClient, namespace, timeout, streamCallback) + + if err != nil { + return stream.Send(&pb.GetInstanceHealthResponse{ + Complete: true, + Error: err.Error(), + }) + } + + return stream.Send(&pb.GetInstanceHealthResponse{ + Complete: true, + }) + } + + status, err := actions.GetHealthStatus(ctx, k8sClient, namespace) + if err != nil { + return stream.Send(&pb.GetInstanceHealthResponse{ + Complete: true, + Error: err.Error(), + }) + } + + return stream.Send(healthStatusToHealthResponse(status, true)) +} + +// Helper to convert internal type to proto +func healthStatusToHealthResponse(status *actions.HealthStatus, complete bool) *pb.GetInstanceHealthResponse { + pods := make([]*pb.PodStatus, len(status.Pods)) + for i, pod := range status.Pods { + pods[i] = &pb.PodStatus{ + Name: pod.Name, + Phase: string(pod.Status.Phase), + Ready: actions.IsPodReady(&pod), + } + } + + return &pb.GetInstanceHealthResponse{ + Healthy: status.Healthy, + ReadyPods: int32(status.Ready), + TotalPods: int32(status.Total), + ActivePods: int32(status.ActivePods), + Pods: pods, + Complete: complete, + } +} diff --git a/internal/grpc/server/instance_status.go b/internal/grpc/server/instance_status.go new file mode 100644 index 0000000..f3020d7 --- /dev/null +++ b/internal/grpc/server/instance_status.go @@ -0,0 +1,55 @@ +package server + +import ( + "context" + "fmt" + + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + "github.com/OpenSlides/openslides-cli/internal/utils" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) GetInstanceStatus(ctx context.Context, req *pb.GetInstanceStatusRequest) (*pb.GetInstanceStatusResponse, error) { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return nil, fmt.Errorf("creating k8s client: %w", err) + } + + namespace := utils.ExtractNamespace(req.InstanceUrl) + status, err := actions.GetInstanceStatus(ctx, k8sClient, namespace) + if err != nil { + return nil, fmt.Errorf("getting instance status: %w", err) + } + + if !status.NamespaceExists { + return &pb.GetInstanceStatusResponse{NamespaceExists: false}, nil + } + + var pods []*pb.InstancePodStatus + for _, pod := range status.Pods { + var containers []*pb.ContainerStatus + for _, c := range pod.Containers { + containers = append(containers, &pb.ContainerStatus{ + Name: c.Name, + Tag: c.Tag, + ContainerRegistry: c.ContainerRegistry, + Ready: c.Ready, + Started: c.Started, + EnvVars: c.EnvVars, + }) + } + pods = append(pods, &pb.InstancePodStatus{ + Name: pod.Name, + Service: pod.Service, + Node: pod.Node, + Containers: containers, + }) + } + + return &pb.GetInstanceStatusResponse{ + NamespaceExists: true, + Pods: pods, + ServiceCounts: status.ServiceCounts, + }, nil +} diff --git a/internal/grpc/server/migrations.go b/internal/grpc/server/migrations.go new file mode 100644 index 0000000..4cc2550 --- /dev/null +++ b/internal/grpc/server/migrations.go @@ -0,0 +1,125 @@ +package server + +import ( + "context" + "fmt" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/manage/actions/migrations" + "github.com/OpenSlides/openslides-cli/internal/manage/client" + "github.com/OpenSlides/openslides-cli/internal/utils" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) MigrationsMigrate( + req *pb.MigrationsRequest, + stream pb.OsmanageService_MigrationsMigrateServer, +) error { + return executeMigrationStream(req, stream, "migrate", + func(r *pb.MigrationsResponse) bool { return !migrations.Running(r) }, + ) +} + +func (s *OsmanageServiceServer) MigrationsFinalize( + req *pb.MigrationsRequest, + stream pb.OsmanageService_MigrationsFinalizeServer, +) error { + return executeMigrationStream(req, stream, "finalize", + func(r *pb.MigrationsResponse) bool { return !migrations.Running(r) && !migrations.Finalizing(r) }, + ) +} + +func (s *OsmanageServiceServer) MigrationsReset( + ctx context.Context, + req *pb.MigrationsRequest, +) (*pb.MigrationsResponse, error) { + return executeMigrationUnary(req, "reset") +} + +func (s *OsmanageServiceServer) MigrationsClearCollectionfieldTables( + ctx context.Context, + req *pb.MigrationsRequest, +) (*pb.MigrationsResponse, error) { + return executeMigrationUnary(req, "clear-collectionfield-tables") +} + +func (s *OsmanageServiceServer) MigrationsStats( + ctx context.Context, + req *pb.MigrationsRequest, +) (*pb.MigrationsResponse, error) { + return executeMigrationUnary(req, "stats") +} + +func (s *OsmanageServiceServer) MigrationsProgress( + ctx context.Context, + req *pb.MigrationsRequest, +) (*pb.MigrationsResponse, error) { + return executeMigrationUnary(req, "progress") +} + +// executeMigrationStream handles streaming migration commands (migrate, finalize) +func executeMigrationStream( + req *pb.MigrationsRequest, + stream interface { + Send(*pb.MigrationsProgressResponse) error + Context() context.Context + }, + command string, + stopCondition func(*pb.MigrationsResponse) bool, +) error { + authPassword, err := utils.ReadPassword(req.PasswordFilePath) + if err != nil { + return fmt.Errorf("reading password from %s: %w", req.PasswordFilePath, err) + } + + backendClient := client.New(req.AddressBackendmanage, authPassword) + + response, err := migrations.ExecuteMigrationCommand(backendClient, command) + if err != nil { + return fmt.Errorf("starting migration: %w", err) + } + + // If already completed within the backend's 0.2s thread wait, return immediately + if stopCondition(response) { + return stream.Send(&pb.MigrationsProgressResponse{ + Output: response.Output, + Running: false, + Success: response.Success, + Exception: response.Exception, + }) + } + + streamCallback := func(update *pb.MigrationsProgressResponse) error { + select { + case <-stream.Context().Done(): + return stream.Context().Err() + default: + } + + return stream.Send(update) + } + + return migrations.TrackMigrationProgress( + backendClient, + constants.DefaultMigrationProgressInterval, + stopCondition, + streamCallback, + ) +} + +// executeMigrationUnary handles single-response migration commands (reset, stats, progress, clear-collectionfield-tables) +func executeMigrationUnary(req *pb.MigrationsRequest, command string) (*pb.MigrationsResponse, error) { + authPassword, err := utils.ReadPassword(req.PasswordFilePath) + if err != nil { + return nil, fmt.Errorf("reading password from %s: %w", req.PasswordFilePath, err) + } + + backendClient := client.New(req.AddressBackendmanage, authPassword) + + response, err := migrations.ExecuteMigrationCommand(backendClient, command) + if err != nil { + return nil, fmt.Errorf("executing migration command %q: %w", command, err) + } + + return response, nil +} diff --git a/internal/grpc/server/remove.go b/internal/grpc/server/remove.go new file mode 100644 index 0000000..0fc25e8 --- /dev/null +++ b/internal/grpc/server/remove.go @@ -0,0 +1,15 @@ +package server + +import ( + "context" + + "github.com/OpenSlides/openslides-cli/internal/instance/remove" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) RemoveInstance(ctx context.Context, req *pb.RemoveInstanceRequest) (*pb.RemoveInstanceResponse, error) { + if err := remove.RemoveInstance(req.InstanceDir, req.Force); err != nil { + return &pb.RemoveInstanceResponse{Success: false, Error: err.Error()}, nil + } + return &pb.RemoveInstanceResponse{Success: true}, nil +} diff --git a/internal/grpc/server/scale.go b/internal/grpc/server/scale.go new file mode 100644 index 0000000..6a5aa8a --- /dev/null +++ b/internal/grpc/server/scale.go @@ -0,0 +1,48 @@ +package server + +import ( + "time" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) ScaleService( + req *pb.ScaleServiceRequest, + stream pb.OsmanageService_ScaleServiceServer, +) error { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return stream.Send(&pb.ScaleServiceResponse{ + Complete: true, + Error: err.Error(), + }) + } + + ctx := stream.Context() + timeout := time.Duration(req.TimeoutSeconds) * time.Second + if timeout == 0 { + timeout = constants.DefaultDeploymentTimeout + } + + err = actions.ScaleService(ctx, k8sClient, req.Service, req.InstanceDir, req.SkipReadyCheck, timeout, + func(status *actions.DeploymentStatus) error { + return stream.Send(&pb.ScaleServiceResponse{ + Complete: false, + ReadyReplicas: int32(status.Ready), + DesiredReplicas: int32(status.Desired), + }) + }, + ) + if err != nil { + return stream.Send(&pb.ScaleServiceResponse{ + Complete: true, + Error: err.Error(), + }) + } + return stream.Send(&pb.ScaleServiceResponse{ + Complete: true, + }) +} diff --git a/internal/grpc/server/send_manage_action.go b/internal/grpc/server/send_manage_action.go new file mode 100644 index 0000000..fa4be5c --- /dev/null +++ b/internal/grpc/server/send_manage_action.go @@ -0,0 +1,36 @@ +package server + +import ( + "context" + + manageclient "github.com/OpenSlides/openslides-cli/internal/manage/client" + "github.com/OpenSlides/openslides-cli/internal/utils" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) SendManageAction(ctx context.Context, req *pb.SendManageActionRequest) (*pb.SendManageActionResponse, error) { + password, err := utils.ReadPassword(req.PasswordFilePath) + if err != nil { + return &pb.SendManageActionResponse{Success: false, Error: err.Error()}, nil + } + + cl := manageclient.New(req.AddressBackendmanage, password) + resp, err := cl.SendAction(req.Action, req.Payload) + if err != nil { + return &pb.SendManageActionResponse{Success: false, Error: err.Error()}, nil + } + + body, err := manageclient.CheckResponse(resp) + if err != nil { + return &pb.SendManageActionResponse{ + Success: false, + Error: err.Error(), + Body: body, + }, nil + } + + return &pb.SendManageActionResponse{ + Success: true, + Body: body, + }, nil +} diff --git a/internal/grpc/server/server.go b/internal/grpc/server/server.go new file mode 100644 index 0000000..704c12a --- /dev/null +++ b/internal/grpc/server/server.go @@ -0,0 +1,75 @@ +package server + +import ( + "fmt" + "net" + "os" + "os/signal" + "syscall" + "time" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/logger" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +// OsmanageServiceServer implements the OsmanageService gRPC interface +type OsmanageServiceServer struct { + pb.UnimplementedOsmanageServiceServer +} + +func NewOsmanageServiceServer() *OsmanageServiceServer { + return &OsmanageServiceServer{} +} + +func Cmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "serve", + Short: "Start gRPC server", + Long: "Start the osmanage gRPC server for client-server communication", + } + + host := cmd.Flags().String("host", constants.GRPCHost, "Host to listen on") + port := cmd.Flags().String("port", constants.GRPCPort, "Port to listen on") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + address := fmt.Sprintf("%s:%s", *host, *port) + return start(address) + } + + return cmd +} + +func start(address string) error { + lis, err := net.Listen("tcp", address) + if err != nil { + return fmt.Errorf("failed to listen on %s: %w", address, err) + } + + grpcSrv := grpc.NewServer() + pb.RegisterOsmanageServiceServer(grpcSrv, NewOsmanageServiceServer()) + + quit := make(chan os.Signal, 1) + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-quit + logger.Info("shutting down gRPC server...") + + // Force shutdown after timeout if graceful takes too long + go func() { + time.Sleep(constants.GRPCGracefulStopTimeout) + logger.Info("graceful shutdown timed out, forcing stop") + grpcSrv.Stop() + }() + + grpcSrv.GracefulStop() + }() + + logger.Info("gRPC server listening on %s", address) + if err := grpcSrv.Serve(lis); err != nil { + return fmt.Errorf("gRPC server error: %w", err) + } + return nil +} diff --git a/internal/grpc/server/setup.go b/internal/grpc/server/setup.go new file mode 100644 index 0000000..a67fbc9 --- /dev/null +++ b/internal/grpc/server/setup.go @@ -0,0 +1,23 @@ +package server + +import ( + "context" + + "github.com/OpenSlides/openslides-cli/internal/instance/setup" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) SetupInstance(ctx context.Context, req *pb.InstanceConfigRequest) (*pb.InstanceConfigResponse, error) { + err := setup.Run( + req.InstanceDir, + req.Force, + req.Clean, + req.StackTemplatePath, + nil, + req.Configs, + ) + if err != nil { + return &pb.InstanceConfigResponse{Success: false, Error: err.Error()}, nil + } + return &pb.InstanceConfigResponse{Success: true}, nil +} diff --git a/internal/grpc/server/start.go b/internal/grpc/server/start.go new file mode 100644 index 0000000..23cb11b --- /dev/null +++ b/internal/grpc/server/start.go @@ -0,0 +1,71 @@ +package server + +import ( + "time" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) StartInstance( + req *pb.StartInstanceRequest, + stream pb.OsmanageService_StartInstanceServer, +) error { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return stream.Send(&pb.StartInstanceResponse{ + Complete: true, + Error: err.Error(), + }) + } + + ctx := stream.Context() + timeout := time.Duration(req.TimeoutSeconds) * time.Second + if timeout == 0 { + timeout = constants.DefaultInstanceTimeout + } + + streamCallback := func(status *actions.HealthStatus) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + return stream.Send(healthStatusToStartResponse(status, false)) + } + + err = actions.StartInstance(ctx, k8sClient, req.InstanceDir, req.SkipReadyCheck, timeout, req.Labels, streamCallback) + if err != nil { + return stream.Send(&pb.StartInstanceResponse{ + Complete: true, + Error: err.Error(), + }) + } + return stream.Send(&pb.StartInstanceResponse{ + Complete: true, + }) +} + +// Helper to convert internal type to proto +func healthStatusToStartResponse(status *actions.HealthStatus, complete bool) *pb.StartInstanceResponse { + pods := make([]*pb.PodStatus, len(status.Pods)) + for i, pod := range status.Pods { + pods[i] = &pb.PodStatus{ + Name: pod.Name, + Phase: string(pod.Status.Phase), + Ready: actions.IsPodReady(&pod), + } + } + + return &pb.StartInstanceResponse{ + Healthy: status.Healthy, + ReadyPods: int32(status.Ready), + TotalPods: int32(status.Total), + ActivePods: int32(status.ActivePods), + Pods: pods, + Complete: complete, + } +} diff --git a/internal/grpc/server/stop.go b/internal/grpc/server/stop.go new file mode 100644 index 0000000..95c446f --- /dev/null +++ b/internal/grpc/server/stop.go @@ -0,0 +1,47 @@ +package server + +import ( + "time" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) StopInstance( + req *pb.StopInstanceRequest, + stream pb.OsmanageService_StopInstanceServer, +) error { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return stream.Send(&pb.StopInstanceResponse{ + Complete: true, + Error: err.Error(), + }) + } + + ctx := stream.Context() + timeout := time.Duration(req.TimeoutSeconds) * time.Second + if timeout == 0 { + timeout = constants.DefaultNamespaceTimeout + } + + err = actions.StopInstance(ctx, k8sClient, req.InstanceDir, timeout, + func(elapsedSeconds int) error { + return stream.Send(&pb.StopInstanceResponse{ + Complete: false, + ElapsedSeconds: int32(elapsedSeconds), + }) + }, + ) + if err != nil { + return stream.Send(&pb.StopInstanceResponse{ + Complete: true, + Error: err.Error(), + }) + } + return stream.Send(&pb.StopInstanceResponse{ + Complete: true, + }) +} diff --git a/internal/grpc/server/update_backendmanage.go b/internal/grpc/server/update_backendmanage.go new file mode 100644 index 0000000..d118cf7 --- /dev/null +++ b/internal/grpc/server/update_backendmanage.go @@ -0,0 +1,48 @@ +package server + +import ( + "time" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) UpdateBackendmanage( + req *pb.UpdateBackendmanageRequest, + stream pb.OsmanageService_UpdateBackendmanageServer, +) error { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return stream.Send(&pb.UpdateBackendmanageResponse{ + Complete: true, + Error: err.Error(), + }) + } + + ctx := stream.Context() + timeout := time.Duration(req.TimeoutSeconds) * time.Second + if timeout == 0 { + timeout = constants.DefaultDeploymentTimeout + } + + err = actions.UpdateBackendmanage(ctx, k8sClient, req.InstanceUrl, req.Tag, req.ContainerRegistry, timeout, + func(status *actions.DeploymentStatus) error { + return stream.Send(&pb.UpdateBackendmanageResponse{ + Complete: false, + ReadyReplicas: int32(status.Ready), + DesiredReplicas: int32(status.Desired), + }) + }, + ) + if err != nil { + return stream.Send(&pb.UpdateBackendmanageResponse{ + Complete: true, + Error: err.Error(), + }) + } + return stream.Send(&pb.UpdateBackendmanageResponse{ + Complete: true, + }) +} diff --git a/internal/grpc/server/update_instance.go b/internal/grpc/server/update_instance.go new file mode 100644 index 0000000..dd3bd0b --- /dev/null +++ b/internal/grpc/server/update_instance.go @@ -0,0 +1,78 @@ +package server + +import ( + "time" + + "github.com/OpenSlides/openslides-cli/internal/constants" + "github.com/OpenSlides/openslides-cli/internal/k8s/actions" + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" +) + +func (s *OsmanageServiceServer) UpdateInstance( + req *pb.UpdateInstanceRequest, + stream pb.OsmanageService_UpdateInstanceServer, +) error { + k8sClient, err := client.New(req.Kubeconfig) + if err != nil { + return stream.Send(&pb.UpdateInstanceResponse{ + Complete: true, + Error: err.Error(), + }) + } + + ctx := stream.Context() + timeout := time.Duration(req.TimeoutSeconds) * time.Second + if timeout == 0 { + timeout = constants.DefaultInstanceTimeout + } + + streamCallback := func(status *actions.HealthStatus) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + return stream.Send(healthStatusToUpdateResponse(status, false)) + } + + inactiveCallback := func() error { + return stream.Send(&pb.UpdateInstanceResponse{ + Complete: true, + Inactive: true, + }) + } + + err = actions.UpdateInstance(ctx, k8sClient, req.InstanceDir, req.SkipReadyCheck, timeout, streamCallback, inactiveCallback) + if err != nil { + return stream.Send(&pb.UpdateInstanceResponse{ + Complete: true, + Error: err.Error(), + }) + } + return stream.Send(&pb.UpdateInstanceResponse{ + Complete: true, + }) +} + +// Helper to convert internal type to proto +func healthStatusToUpdateResponse(status *actions.HealthStatus, complete bool) *pb.UpdateInstanceResponse { + pods := make([]*pb.PodStatus, len(status.Pods)) + for i, pod := range status.Pods { + pods[i] = &pb.PodStatus{ + Name: pod.Name, + Phase: string(pod.Status.Phase), + Ready: actions.IsPodReady(&pod), + } + } + + return &pb.UpdateInstanceResponse{ + Healthy: status.Healthy, + ReadyPods: int32(status.Ready), + TotalPods: int32(status.Total), + ActivePods: int32(status.ActivePods), + Pods: pods, + Complete: complete, + } +} diff --git a/internal/instance/config/config.go b/internal/instance/config/config.go index 688ec53..117dd41 100644 --- a/internal/instance/config/config.go +++ b/internal/instance/config/config.go @@ -51,6 +51,7 @@ func Cmd() *cobra.Command { } force := cmd.Flags().BoolP("force", "f", false, "overwrite existing files") + clean := cmd.Flags().Bool("clean", false, "Wipe stack folder contents before generating new files") customTemplate := cmd.Flags().StringP("template", "t", "", "custom template file or directory") configFiles := cmd.Flags().StringArrayP("config", "c", nil, "custom YAML config file (can be used multiple times)") cmd.MarkFlagsRequiredTogether("template", "config") @@ -59,16 +60,11 @@ func Cmd() *cobra.Command { logger.Info("=== CONFIG ===") baseDir := args[0] - logger.Debug("Base directory: %s", args[0]) + logger.Debug("Base directory: %s", baseDir) logger.Debug("Config files: %v", *configFiles) - config, err := NewConfig(*configFiles) - if err != nil { - return fmt.Errorf("parsing configuration: %w", err) - } - - if err := CreateDirAndFiles(baseDir, *force, *customTemplate, config); err != nil { - return fmt.Errorf("creating deployment files: %w", err) + if err := Run(baseDir, *force, *clean, *customTemplate, *configFiles, nil); err != nil { + return err } logger.Info("Config files created successfully") @@ -79,34 +75,64 @@ func Cmd() *cobra.Command { return cmd } -// NewConfig creates a configuration map by deep-merging all given files in order. -// Later files override existing keys and add new keys. -func NewConfig(configFileNames []string) (map[string]any, error) { - logger.Debug("Loading configuration from %d file(s)", len(configFileNames)) +// Run merges configFiles and optional instanceConfig (merged last, wins on conflict) +// into a config map, then generates deployment files from the template into baseDir. +func Run(baseDir string, force, clean bool, customTemplate string, configFiles []string, configs [][]byte) error { + if clean { + if err := os.RemoveAll(filepath.Join(baseDir, "stack")); err != nil { + return fmt.Errorf("cleaning stack folder: %w", err) + } + } + cfg, err := NewConfig(configFiles, configs) + if err != nil { + return fmt.Errorf("parsing configuration: %w", err) + } + if err := CreateDirAndFiles(baseDir, force, customTemplate, cfg); err != nil { + return fmt.Errorf("creating deployment files: %w", err) + } + return nil +} +// NewConfig creates a configuration map by deep-merging configs in order. +// Later entries override existing keys and add new keys. +// Exactly one of configFiles or configs should be provided: +// - configFiles: path-based configs for direct CLI use, files are read from disk +// - configs: pre-read byte slices for gRPC use, files are read on the client side +func NewConfig(configFiles []string, configs [][]byte) (map[string]any, error) { config := make(map[string]any) - for _, filename := range configFileNames { - logger.Debug("Reading config file: %s", filename) + for _, filename := range configFiles { data, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("reading config file %q: %w", filename, err) } - - var fileConfig map[string]any - if err := yaml.Unmarshal(data, &fileConfig); err != nil { - return nil, fmt.Errorf("unmarshaling YAML from %q: %w", filename, err) + if err := mergeYAML(&config, data, filename); err != nil { + return nil, err } + } - // Deep merge fileConfig into config - if err := mergo.Merge(&config, fileConfig, mergo.WithOverride); err != nil { - return nil, fmt.Errorf("merging config from %q: %w", filename, err) + for i, data := range configs { + if err := mergeYAML(&config, data, fmt.Sprintf("config[%d]", i)); err != nil { + return nil, err } } return config, nil } +// mergeYAML unmarshals YAML data into a map and deep-merges it into config, +// with later values overriding existing keys. label is used for error messages. +func mergeYAML(config *map[string]any, data []byte, label string) error { + var parsed map[string]any + if err := yaml.Unmarshal(data, &parsed); err != nil { + return fmt.Errorf("unmarshaling YAML from %q: %w", label, err) + } + if err := mergo.Merge(config, parsed, mergo.WithOverride); err != nil { + return fmt.Errorf("merging config from %q: %w", label, err) + } + return nil +} + // CreateDirAndFiles creates the base directory and (re-)creates the deployment // files according to the given template. Use a truthy value for force to // override existing files. diff --git a/internal/instance/config/config_test.go b/internal/instance/config/config_test.go index 12f63b0..68401ae 100644 --- a/internal/instance/config/config_test.go +++ b/internal/instance/config/config_test.go @@ -11,7 +11,7 @@ import ( func TestNewConfig(t *testing.T) { t.Run("empty config list", func(t *testing.T) { - cfg, err := NewConfig([]string{}) + cfg, err := NewConfig(nil, nil) if err != nil { t.Errorf("NewConfig() error = %v", err) } @@ -25,7 +25,7 @@ func TestNewConfig(t *testing.T) { } }) - t.Run("with custom config", func(t *testing.T) { + t.Run("with custom config file", func(t *testing.T) { tmpfile, err := os.CreateTemp("", "config-*.yml") if err != nil { t.Fatal(err) @@ -52,7 +52,7 @@ defaults: t.Fatalf("failed to close temp file: %v", err) } - cfg, err := NewConfig([]string{tmpfile.Name()}) + cfg, err := NewConfig([]string{tmpfile.Name()}, nil) if err != nil { t.Errorf("NewConfig() error = %v", err) } @@ -62,7 +62,7 @@ defaults: if cfg["host"] != "192.168.1.1" { t.Errorf("Expected host 192.168.1.1, got %v", cfg["host"]) } - if cfg["port"] != float64(9000) { // YAML unmarshals numbers as float64 + if cfg["port"] != float64(9000) { t.Errorf("Expected port 9000, got %v", cfg["port"]) } @@ -79,7 +79,7 @@ defaults: } }) - t.Run("merge multiple configs", func(t *testing.T) { + t.Run("merge multiple config files", func(t *testing.T) { tmpdir := t.TempDir() config1 := filepath.Join(tmpdir, "config1.yml") @@ -92,7 +92,7 @@ defaults: t.Fatalf("failed to write config2: %v", err) } - cfg, err := NewConfig([]string{config1, config2}) + cfg, err := NewConfig([]string{config1, config2}, nil) if err != nil { t.Errorf("NewConfig() error = %v", err) } @@ -110,6 +110,96 @@ defaults: } }) + t.Run("gRPC bytes configs override earlier entries", func(t *testing.T) { + // Simulates stack_config at index 0, instance config at index 1 — sent over gRPC + stackConfig := []byte(` +defaults: + tag: 4.2.0 + containerRegistry: registry.example.com +stackName: base-stack +`) + instanceConfig := []byte(`{ + "stackName": "my.instance.org", + "defaults": { + "tag": "4.3.0" + } + }`) + + cfg, err := NewConfig(nil, [][]byte{stackConfig, instanceConfig}) + if err != nil { + t.Errorf("NewConfig() error = %v", err) + } + + // instance config should override stack config + if cfg["stackName"] != "my.instance.org" { + t.Errorf("Expected stackName my.instance.org from instance config, got %v", cfg["stackName"]) + } + defaults, ok := cfg["defaults"].(map[string]any) + if !ok { + t.Fatal("Expected defaults to be a map") + } + if defaults["tag"] != "4.3.0" { + t.Errorf("Expected tag 4.3.0 from instance config, got %v", defaults["tag"]) + } + // containerRegistry should be preserved from stack config + if defaults["containerRegistry"] != "registry.example.com" { + t.Errorf("Expected containerRegistry preserved from stack config, got %v", defaults["containerRegistry"]) + } + }) + + t.Run("nil configs is a no-op", func(t *testing.T) { + tmpdir := t.TempDir() + + stackConfig := filepath.Join(tmpdir, "stack.yml") + if err := os.WriteFile(stackConfig, []byte("stackName: base-stack\n"), constants.StackFilePerm); err != nil { + t.Fatalf("failed to write stack config: %v", err) + } + + cfg, err := NewConfig([]string{stackConfig}, nil) + if err != nil { + t.Errorf("NewConfig() error = %v", err) + } + if cfg["stackName"] != "base-stack" { + t.Errorf("Expected stackName base-stack, got %v", cfg["stackName"]) + } + }) + + t.Run("invalid gRPC config bytes", func(t *testing.T) { + _, err := NewConfig(nil, [][]byte{[]byte("invalid: yaml: content:")}) + if err == nil { + t.Error("Expected error for invalid config bytes") + } + }) + + t.Run("invalid YAML file", func(t *testing.T) { + tmpfile, err := os.CreateTemp("", "config-*.yml") + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := os.Remove(tmpfile.Name()); err != nil { + t.Logf("warning: failed to remove temp file: %v", err) + } + }) + if _, err := tmpfile.WriteString("invalid: yaml: content:"); err != nil { + t.Fatalf("failed to write invalid yaml: %v", err) + } + if err := tmpfile.Close(); err != nil { + t.Fatalf("failed to close temp file: %v", err) + } + _, err = NewConfig([]string{tmpfile.Name()}, nil) + if err == nil { + t.Error("Expected error for invalid YAML") + } + }) + + t.Run("nonexistent file", func(t *testing.T) { + _, err := NewConfig([]string{"nonexistent.yml"}, nil) + if err == nil { + t.Error("Expected error for nonexistent file") + } + }) + t.Run("deep merge nested structures", func(t *testing.T) { tmpdir := t.TempDir() @@ -133,7 +223,7 @@ services: t.Fatalf("failed to write config2: %v", err) } - cfg, err := NewConfig([]string{config1, config2}) + cfg, err := NewConfig([]string{config1, config2}, nil) if err != nil { t.Errorf("NewConfig() error = %v", err) } @@ -186,7 +276,7 @@ services: t.Fatalf("failed to write config2: %v", err) } - cfg, err := NewConfig([]string{config1, config2}) + cfg, err := NewConfig([]string{config1, config2}, nil) if err != nil { t.Errorf("NewConfig() error = %v", err) } @@ -235,7 +325,7 @@ services: t.Fatalf("failed to write config2: %v", err) } - cfg, err := NewConfig([]string{config1, config2}) + cfg, err := NewConfig([]string{config1, config2}, nil) if err != nil { t.Errorf("NewConfig() error = %v", err) } @@ -266,38 +356,7 @@ services: // auth service doesn't exist at all - that's fine with 'or' pattern if _, exists := services["auth"]; exists { - t.Errorf("auth service should not exist, but it do") - } - }) - - t.Run("invalid YAML file", func(t *testing.T) { - tmpfile, err := os.CreateTemp("", "config-*.yml") - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - if err := os.Remove(tmpfile.Name()); err != nil { - t.Logf("warning: failed to remove temp file: %v", err) - } - }) - - if _, err := tmpfile.WriteString("invalid: yaml: content:"); err != nil { - t.Fatalf("failed to write invalid yaml: %v", err) - } - if err := tmpfile.Close(); err != nil { - t.Fatalf("failed to close temp file: %v", err) - } - - _, err = NewConfig([]string{tmpfile.Name()}) - if err == nil { - t.Error("Expected error for invalid YAML") - } - }) - - t.Run("nonexistent file", func(t *testing.T) { - _, err := NewConfig([]string{"nonexistent.yml"}) - if err == nil { - t.Error("Expected error for nonexistent file") + t.Errorf("auth service should not exist, but it does") } }) } @@ -454,10 +513,7 @@ func TestEnvMapToK8S(t *testing.T) { }) t.Run("handles empty map", func(t *testing.T) { - env := map[string]any{} - - result := envMapToK8S(env) - + result := envMapToK8S(map[string]any{}) if len(result) != 0 { t.Errorf("Expected 0 items, got %d", len(result)) } diff --git a/internal/instance/create/create.go b/internal/instance/create/create.go index f1ea84f..8383473 100644 --- a/internal/instance/create/create.go +++ b/internal/instance/create/create.go @@ -44,18 +44,11 @@ func Cmd() *cobra.Command { _ = cmd.MarkFlagRequired("superadmin-password") cmd.RunE = func(cmd *cobra.Command, args []string) error { - if strings.TrimSpace(*dbPassword) == "" { - return fmt.Errorf("--db-password cannot be empty") - } - if strings.TrimSpace(*superadminPassword) == "" { - return fmt.Errorf("--superadmin-password cannot be empty") - } - logger.Info("=== K8S CREATE INSTANCE ===") instanceDir := args[0] logger.Debug("Instance directory: %s", instanceDir) - if err := createInstance(instanceDir, *dbPassword, *superadminPassword); err != nil { + if err := CreateInstance(instanceDir, *dbPassword, *superadminPassword); err != nil { return fmt.Errorf("creating instance: %w", err) } @@ -66,8 +59,15 @@ func Cmd() *cobra.Command { return cmd } -// createInstance sets up the secrets directory with the provided passwords -func createInstance(instanceDir, dbPassword, superadminPassword string) error { +// CreateInstance sets up the secrets directory with the provided passwords +func CreateInstance(instanceDir, dbPassword, superadminPassword string) error { + if strings.TrimSpace(dbPassword) == "" { + return fmt.Errorf("db_password cannot be empty") + } + if strings.TrimSpace(superadminPassword) == "" { + return fmt.Errorf("superadmin_password cannot be empty") + } + secretsDir := filepath.Join(instanceDir, constants.SecretsDirName) if _, err := os.Stat(secretsDir); os.IsNotExist(err) { diff --git a/internal/instance/create/create_test.go b/internal/instance/create/create_test.go index 9b4c171..0ab77b2 100644 --- a/internal/instance/create/create_test.go +++ b/internal/instance/create/create_test.go @@ -157,7 +157,7 @@ func TestCreateInstance(t *testing.T) { dbPassword := "new-database-password" superadminPassword := "new-superadmin-password" - err = createInstance(tmpDir, dbPassword, superadminPassword) + err = CreateInstance(tmpDir, dbPassword, superadminPassword) if err != nil { t.Fatalf("createInstance failed: %v", err) } @@ -234,7 +234,7 @@ func TestCreateInstance_SecretsDirectoryNotExist(t *testing.T) { }) // Don't create secrets directory - should fail - err = createInstance(tmpDir, "password", "admin") + err = CreateInstance(tmpDir, "password", "admin") if err == nil { t.Error("Expected error when secrets directory doesn't exist, got nil") } diff --git a/internal/instance/remove/remove.go b/internal/instance/remove/remove.go index dada244..c23c083 100644 --- a/internal/instance/remove/remove.go +++ b/internal/instance/remove/remove.go @@ -34,7 +34,7 @@ func Cmd() *cobra.Command { instanceDir := args[0] logger.Debug("Instance directory: %s", instanceDir) - if err := removeInstance(instanceDir, *force); err != nil { + if err := RemoveInstance(instanceDir, *force); err != nil { return fmt.Errorf("removing instance: %w", err) } @@ -45,8 +45,9 @@ func Cmd() *cobra.Command { return cmd } -// removeInstance removes the entire instance directory -func removeInstance(instanceDir string, force bool) error { +// RemoveInstance removes the entire instance directory +// When force is false, prompts for confirmation before deletion. +func RemoveInstance(instanceDir string, force bool) error { info, err := os.Stat(instanceDir) if err != nil { if os.IsNotExist(err) { diff --git a/internal/instance/remove/remove_test.go b/internal/instance/remove/remove_test.go index b2bcd43..1a8de62 100644 --- a/internal/instance/remove/remove_test.go +++ b/internal/instance/remove/remove_test.go @@ -36,7 +36,7 @@ func TestRemoveInstance_DirectoryExists(t *testing.T) { } }) - err = removeInstance(instanceDir, true) + err = RemoveInstance(instanceDir, true) if err != nil { t.Fatalf("removeInstance failed: %v", err) } @@ -59,7 +59,7 @@ func TestRemoveInstance_DirectoryNotExist(t *testing.T) { nonExistentDir := filepath.Join(tmpDir, "does-not-exist") - err = removeInstance(nonExistentDir, true) + err = RemoveInstance(nonExistentDir, true) if err == nil { t.Error("Expected error when removing non-existent directory, got nil") } @@ -86,7 +86,7 @@ func TestRemoveInstance_NotADirectory(t *testing.T) { t.Fatalf("Failed to create test file: %v", err) } - err = removeInstance(testFile, true) + err = RemoveInstance(testFile, true) if err == nil { t.Error("Expected error when removing a file instead of directory, got nil") } @@ -140,7 +140,7 @@ func TestRemoveInstance_RemovesNestedStructure(t *testing.T) { } }) - err = removeInstance(instanceDir, true) + err = RemoveInstance(instanceDir, true) if err != nil { t.Fatalf("removeInstance failed: %v", err) } @@ -171,7 +171,7 @@ func TestRemoveInstance_WithForceFlag(t *testing.T) { } }) - err = removeInstance(instanceDir, true) + err = RemoveInstance(instanceDir, true) if err != nil { t.Fatalf("removeInstance with force=true failed: %v", err) } @@ -198,7 +198,7 @@ func TestRemoveInstance_EmptyDirectory(t *testing.T) { } }) - err = removeInstance(instanceDir, true) + err = RemoveInstance(instanceDir, true) if err != nil { t.Fatalf("removeInstance failed on empty directory: %v", err) } @@ -235,7 +235,7 @@ func TestRemoveInstance_WithSymlinks(t *testing.T) { } }) - err = removeInstance(instanceDir, true) + err = RemoveInstance(instanceDir, true) if err != nil { t.Fatalf("removeInstance failed: %v", err) } diff --git a/internal/instance/setup/setup.go b/internal/instance/setup/setup.go index 20b1b22..e32998a 100644 --- a/internal/instance/setup/setup.go +++ b/internal/instance/setup/setup.go @@ -63,6 +63,7 @@ func Cmd() *cobra.Command { } force := cmd.Flags().BoolP("force", "f", false, "overwrite existing files") + clean := cmd.Flags().Bool("clean", false, "Wipe stack folder contents before generating new files") customTemplate := cmd.Flags().StringP("template", "t", "", "custom template file or directory") configFiles := cmd.Flags().StringArrayP("config", "c", nil, "custom YAML config file (can be used multiple times)") cmd.MarkFlagsRequiredTogether("template", "config") @@ -74,45 +75,59 @@ func Cmd() *cobra.Command { logger.Debug("Base directory: %s", baseDir) logger.Debug("Force: %v, Custom: %s", *force, *customTemplate) - // Parse configuration - cfg, err := config.NewConfig(*configFiles) - if err != nil { - return fmt.Errorf("parsing configuration: %w", err) + if err := Run(baseDir, *force, *clean, *customTemplate, *configFiles, nil); err != nil { + return err } - // Create secrets directory - secretsDir := filepath.Join(baseDir, constants.SecretsDirName) - logger.Debug("Creating secrets directory: %s", secretsDir) - if err := os.MkdirAll(secretsDir, constants.SecretsDirPerm); err != nil { - return fmt.Errorf("creating secrets directory: %w", err) - } + logger.Info("Setup completed successfully") + fmt.Printf("Setup completed in: %s\n", baseDir) + return nil + } - // Create secrets - logger.Info("Creating secrets...") - if err := createSecrets(secretsDir, *force, defaultSecrets); err != nil { - return fmt.Errorf("creating secrets: %w", err) - } + return cmd +} - // Create certificates if HTTPS is enabled - if enableLocalHTTPS, ok := cfg["enableLocalHTTPS"].(bool); ok && enableLocalHTTPS { - logger.Info("Creating SSL certificates...") - if err := createCerts(secretsDir, *force); err != nil { - return fmt.Errorf("creating certificates: %w", err) - } +// Run creates secrets, optional SSL certificates, and deployment files for a new +// instance. Exactly one of configFiles (CLI) or configs (gRPC) should be provided. +// configs are pre-read byte slices sent over gRPC, configFiles are read from disk. +// In both cases the last entry wins on conflict before generating deployment files +// from the template into baseDir. +func Run(baseDir string, force, clean bool, customTemplate string, configFiles []string, configs [][]byte) error { + if clean { + if err := os.RemoveAll(filepath.Join(baseDir, "stack")); err != nil { + return fmt.Errorf("cleaning stack folder: %w", err) } + } - // Create deployment files - logger.Info("Creating deployment files...") - if err := config.CreateDirAndFiles(baseDir, *force, *customTemplate, cfg); err != nil { - return fmt.Errorf("creating deployment files: %w", err) + cfg, err := config.NewConfig(configFiles, configs) + if err != nil { + return fmt.Errorf("parsing configuration: %w", err) + } + + secretsDir := filepath.Join(baseDir, constants.SecretsDirName) + logger.Debug("Creating secrets directory: %s", secretsDir) + if err := os.MkdirAll(secretsDir, constants.SecretsDirPerm); err != nil { + return fmt.Errorf("creating secrets directory: %w", err) + } + + logger.Info("Creating secrets...") + if err := createSecrets(secretsDir, force, defaultSecrets); err != nil { + return fmt.Errorf("creating secrets: %w", err) + } + + if enableLocalHTTPS, ok := cfg["enableLocalHTTPS"].(bool); ok && enableLocalHTTPS { + logger.Info("Creating SSL certificates...") + if err := createCerts(secretsDir, force); err != nil { + return fmt.Errorf("creating certificates: %w", err) } + } - logger.Info("Setup completed successfully") - fmt.Printf("Setup completed in: %s\n", baseDir) - return nil + logger.Info("Creating deployment files...") + if err := config.CreateDirAndFiles(baseDir, force, customTemplate, cfg); err != nil { + return fmt.Errorf("creating deployment files: %w", err) } - return cmd + return nil } func createSecrets(dir string, force bool, secrets []SecretSpec) error { diff --git a/internal/k8s/actions/apply.go b/internal/k8s/actions/apply.go index 30c3d40..012446d 100644 --- a/internal/k8s/actions/apply.go +++ b/internal/k8s/actions/apply.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/yaml" ) @@ -26,23 +27,35 @@ const ( forceConflicts bool = true ) -// applyManifest applies a single YAML manifest file using RESTMapper -func applyManifest(ctx context.Context, k8sClient *client.Client, manifestPath string) (string, error) { +// resourceKey uniquely identifies a Kubernetes resource by GVR and name +type resourceKey struct { + gvr schema.GroupVersionResource + name string +} + +// applyManifest applies a single YAML manifest file using RESTMapper and returns +// the applied resourceKey and namespace. Returns nil key if the manifest is skipped. +func applyManifest(ctx context.Context, k8sClient *client.Client, manifestPath string, labels map[string]string) (*resourceKey, string, error) { logger.Debug("Applying manifest: %s", manifestPath) data, err := os.ReadFile(manifestPath) if err != nil { - return "", fmt.Errorf("reading manifest: %w", err) + return nil, "", fmt.Errorf("reading manifest: %w", err) } var obj unstructured.Unstructured if err := yaml.Unmarshal(data, &obj); err != nil { - return "", fmt.Errorf("parsing YAML: %w", err) + return nil, "", fmt.Errorf("parsing YAML: %w", err) } if obj.GetKind() == "" { logger.Info("Skipping manifest with no kind: %s", manifestPath) - return "", nil + return nil, "", nil + } + + if !matchesLabels(&obj, labels) { + logger.Debug("Skipping %s/%s: does not match labels", obj.GetKind(), obj.GetName()) + return nil, "", nil } namespace := obj.GetNamespace() @@ -52,25 +65,25 @@ func applyManifest(ctx context.Context, k8sClient *client.Client, manifestPath s mapper, err := k8sClient.RESTMapper() if err != nil { - return "", fmt.Errorf("getting REST mapper: %w", err) + return nil, "", fmt.Errorf("getting REST mapper: %w", err) } gvk := obj.GroupVersionKind() mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { - return "", fmt.Errorf("getting REST mapping for %s: %w", gvk.String(), err) + return nil, "", fmt.Errorf("getting REST mapping for %s: %w", gvk.String(), err) } dynamicClient, err := k8sClient.Dynamic() if err != nil { - return "", fmt.Errorf("getting dynamic client: %w", err) + return nil, "", fmt.Errorf("getting dynamic client: %w", err) } var result *unstructured.Unstructured if mapping.Scope.Name() == meta.RESTScopeNameNamespace { if namespace == "" { - return "", fmt.Errorf("resource %s/%s is namespaced but has no namespace specified", + return nil, "", fmt.Errorf("resource %s/%s is namespaced but has no namespace specified", obj.GetKind(), obj.GetName()) } result, err = dynamicClient.Resource(mapping.Resource).Namespace(namespace).Apply( @@ -96,18 +109,32 @@ func applyManifest(ctx context.Context, k8sClient *client.Client, manifestPath s } if err != nil { - return namespace, fmt.Errorf("applying %s/%s: %w", obj.GetKind(), obj.GetName(), err) + return nil, namespace, fmt.Errorf("applying %s/%s: %w", obj.GetKind(), obj.GetName(), err) } logger.Info("Applied %s: %s", result.GetKind(), result.GetName()) - return namespace, nil + return &resourceKey{gvr: mapping.Resource, name: obj.GetName()}, namespace, nil +} + +// matchesLabels checks if an unstructured object has all the given labels +func matchesLabels(obj *unstructured.Unstructured, labels map[string]string) bool { + if len(labels) == 0 { + return true + } + objLabels := obj.GetLabels() + for k, v := range labels { + if objLabels[k] != v { + return false + } + } + return true } -// applyDirectory applies all YAML files in a directory -func applyDirectory(ctx context.Context, k8sClient *client.Client, dirPath string) error { +// applyDirectory applies all YAML files in a directory and returns the set of applied resources. +func applyDirectory(ctx context.Context, k8sClient *client.Client, dirPath string, labels map[string]string) ([]resourceKey, error) { files, err := os.ReadDir(dirPath) if err != nil { - return fmt.Errorf("reading directory: %w", err) + return nil, fmt.Errorf("reading directory: %w", err) } var yamlFiles []os.DirEntry @@ -129,12 +156,78 @@ func applyDirectory(ctx context.Context, k8sClient *client.Client, dirPath strin return constants.GetKindPriority(kindI) < constants.GetKindPriority(kindJ) }) + var applied []resourceKey for _, file := range yamlFiles { manifestPath := filepath.Join(dirPath, file.Name()) - if _, err := applyManifest(ctx, k8sClient, manifestPath); err != nil { + key, _, err := applyManifest(ctx, k8sClient, manifestPath, labels) + if err != nil { logger.Warn("Failed to apply %s: %v", file.Name(), err) continue } + if key != nil { + applied = append(applied, *key) + } + } + + return applied, nil +} + +// pruneOrphans deletes namespaced resources in the given namespace that are owned +// by osmanage but are no longer present in the applied set. +func pruneOrphans(ctx context.Context, k8sClient *client.Client, namespace string, applied []resourceKey) error { + desired := make(map[resourceKey]bool, len(applied)) + for _, k := range applied { + desired[k] = true + } + + dynamicClient, err := k8sClient.Dynamic() + if err != nil { + return fmt.Errorf("getting dynamic client: %w", err) + } + + groupResources, err := k8sClient.APIGroupResources() + if err != nil { + return fmt.Errorf("getting API group resources: %w", err) + } + + for _, group := range groupResources { + for _, version := range group.Group.Versions { + for _, resource := range group.VersionedResources[version.Version] { + if !resource.Namespaced { + continue + } + + gvr := schema.GroupVersionResource{ + Group: group.Group.Name, + Version: version.Version, + Resource: resource.Name, + } + + list, err := dynamicClient.Resource(gvr).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + logger.Debug("Skipping %s: %v", gvr.Resource, err) + continue + } + + for _, item := range list.Items { + if desired[resourceKey{gvr: gvr, name: item.GetName()}] { + continue + } + for _, mf := range item.GetManagedFields() { + if mf.Manager != fieldManager { + continue + } + logger.Info("Pruning orphaned %s: %s", item.GetKind(), item.GetName()) + if err := dynamicClient.Resource(gvr).Namespace(namespace).Delete( + ctx, item.GetName(), metav1.DeleteOptions{}, + ); err != nil { + logger.Warn("Failed to prune %s/%s: %v", item.GetKind(), item.GetName(), err) + } + break + } + } + } + } } return nil diff --git a/internal/k8s/actions/cluster_status.go b/internal/k8s/actions/cluster_status.go index 7229125..8a151ef 100644 --- a/internal/k8s/actions/cluster_status.go +++ b/internal/k8s/actions/cluster_status.go @@ -53,7 +53,7 @@ func ClusterStatusCmd() *cobra.Command { ctx := context.Background() - status, err := checkClusterStatus(ctx, k8sClient) + status, err := CheckClusterStatus(ctx, k8sClient) if err != nil { return fmt.Errorf("checking cluster status: %w", err) } @@ -88,7 +88,7 @@ func ClusterStatusCmd() *cobra.Command { } // checkClusterStatus checks the overall cluster health -func checkClusterStatus(ctx context.Context, k8sClient *client.Client) (*ClusterStatus, error) { +func CheckClusterStatus(ctx context.Context, k8sClient *client.Client) (*ClusterStatus, error) { nodes, err := k8sClient.Clientset().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing nodes: %w", err) diff --git a/internal/k8s/actions/get_namespace_exists.go b/internal/k8s/actions/get_namespace_exists.go new file mode 100644 index 0000000..80013d4 --- /dev/null +++ b/internal/k8s/actions/get_namespace_exists.go @@ -0,0 +1,70 @@ +package actions + +import ( + "context" + "fmt" + "strings" + + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + GetNamespaceExistsHelp = "Returns true if namespace for given instance URL exists on cluster." + GetNamespaceExistsHelpExtra = `Will look for namespace with string derived by removing dots + from instance url string (i.e.: my.instance.url.org -> myinstanceurlorg) on the cluster. + +Examples: + osmanage k8s get-namespace-exists my.instance.url.org --kubeconfig ~/.kube/config` +) + +// GetCmd creates the Cobra CLI command +func GetNamespaceExistsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get-namespace-exists ", + Short: GetNamespaceExistsHelp, + Long: GetNamespaceExistsHelp + "\n\n" + GetNamespaceExistsHelpExtra, + Args: cobra.ExactArgs(1), + } + + kubeconfig := cmd.Flags().String("kubeconfig", "", "Path to kubeconfig file") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + instanceUrl := args[0] + + namespace := strings.ReplaceAll(instanceUrl, ".", "") + + k8sClient, err := client.New(*kubeconfig) + if err != nil { + return fmt.Errorf("creating k8s client: %w", err) + } + + ctx := context.Background() + exists, err := GetNamespaceExists(ctx, k8sClient.Clientset(), namespace) + if err != nil { + return err + } + + fmt.Println(exists) + return nil + } + + return cmd +} + +// GetNamespaceExists checks whether the given namespace exists on cluster. +// Returns error on failed client call. +func GetNamespaceExists(ctx context.Context, k8sClient kubernetes.Interface, namespace string) (bool, error) { + _, err := k8sClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return false, nil + } + return false, fmt.Errorf("getting namespace: %w", err) + } + return true, nil +} diff --git a/internal/k8s/actions/get_service_address.go b/internal/k8s/actions/get_service_address.go index 9afa79f..8f7f53e 100644 --- a/internal/k8s/actions/get_service_address.go +++ b/internal/k8s/actions/get_service_address.go @@ -3,12 +3,14 @@ package actions import ( "context" "fmt" + "strings" "github.com/OpenSlides/openslides-cli/internal/k8s/client" - "github.com/OpenSlides/openslides-cli/internal/utils" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) const ( @@ -16,13 +18,14 @@ const ( GetServiceAddressHelpExtra = `Returns the service ClusterIP:Port for the given instance and service name. Examples: - osmanage k8s get-service-address ./my.instance.dir.org backendmanage - osmanage k8s get-service-address ./my.instance.dir.org backendmanage --kubeconfig ~/.kube/config` + osmanage k8s get-service-address my.instance.url.org backendmanage + osmanage k8s get-service-address my.instance.url.org backendmanage --kubeconfig ~/.kube/config` ) +// GetServiceAddressCmd creates the Cobra CLI command func GetServiceAddressCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "get-service-address ", + Use: "get-service-address ", Short: GetServiceAddressHelp, Long: GetServiceAddressHelp + "\n\n" + GetServiceAddressHelpExtra, Args: cobra.ExactArgs(2), @@ -31,10 +34,10 @@ func GetServiceAddressCmd() *cobra.Command { kubeconfig := cmd.Flags().String("kubeconfig", "", "Path to kubeconfig file") cmd.RunE = func(cmd *cobra.Command, args []string) error { - instanceDir := args[0] + instanceUrl := args[0] serviceName := args[1] - namespace := utils.ExtractNamespace(instanceDir) + namespace := strings.ReplaceAll(instanceUrl, ".", "") k8sClient, err := client.New(*kubeconfig) if err != nil { @@ -42,25 +45,39 @@ func GetServiceAddressCmd() *cobra.Command { } ctx := context.Background() - svc, err := k8sClient.Clientset().CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) + address, err := GetServiceAddress(ctx, k8sClient.Clientset(), namespace, serviceName) if err != nil { - return fmt.Errorf("getting service %s: %w", serviceName, err) + return err } - if svc.Spec.ClusterIP == "" { - return fmt.Errorf("service %s has no ClusterIP", serviceName) - } + fmt.Println(address) + return nil + } + + return cmd +} - if len(svc.Spec.Ports) == 0 { - return fmt.Errorf("service %s has no ports", serviceName) +// GetServiceAddress retrieves the ClusterIP:Port address for a service in a namespace. +// Returns error if service doesn't exist or has invalid configuration. +func GetServiceAddress(ctx context.Context, k8sClient kubernetes.Interface, namespace, serviceName string) (string, error) { + svc, err := k8sClient.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return "", fmt.Errorf("service %s not found in namespace %s", serviceName, namespace) } + return "", fmt.Errorf("getting service: %w", err) + } - port := svc.Spec.Ports[0].Port - address := fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, port) + if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { + return "", fmt.Errorf("service %s has no ClusterIP", serviceName) + } - fmt.Println(address) - return nil + if len(svc.Spec.Ports) == 0 { + return "", fmt.Errorf("service %s has no ports", serviceName) } - return cmd + port := svc.Spec.Ports[0].Port + address := fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, port) + + return address, nil } diff --git a/internal/k8s/actions/health.go b/internal/k8s/actions/health.go index 06b2ce9..df8e242 100644 --- a/internal/k8s/actions/health.go +++ b/internal/k8s/actions/health.go @@ -4,11 +4,12 @@ import ( "context" "fmt" + "github.com/spf13/cobra" + "github.com/OpenSlides/openslides-cli/internal/constants" "github.com/OpenSlides/openslides-cli/internal/k8s/client" "github.com/OpenSlides/openslides-cli/internal/logger" "github.com/OpenSlides/openslides-cli/internal/utils" - "github.com/spf13/cobra" ) const ( @@ -34,6 +35,7 @@ func HealthCmd() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { logger.Info("=== K8S HEALTH CHECK ===") + instanceDir := args[0] namespace := utils.ExtractNamespace(instanceDir) logger.Debug("Namespace: %s", namespace) @@ -46,10 +48,22 @@ func HealthCmd() *cobra.Command { ctx := context.Background() if *wait { - return waitForInstanceHealthy(ctx, k8sClient, namespace, *timeout) + return WaitForInstanceHealthy(ctx, k8sClient, namespace, *timeout, nil) + } + + status, err := GetHealthStatus(ctx, k8sClient, namespace) + if err != nil { + return fmt.Errorf("getting health status: %w", err) + } + + printHealthStatus(namespace, status) + + if !status.Healthy { + return fmt.Errorf("instance is not healthy: %d/%d pods ready", status.Ready, status.Total) } - return checkHealth(ctx, k8sClient, namespace) + logger.Info("Instance is healthy") + return nil } return cmd diff --git a/internal/k8s/actions/health_check.go b/internal/k8s/actions/health_check.go index 74e57b1..e311ffc 100644 --- a/internal/k8s/actions/health_check.go +++ b/internal/k8s/actions/health_check.go @@ -11,7 +11,6 @@ import ( "github.com/OpenSlides/openslides-cli/internal/k8s/client" "github.com/OpenSlides/openslides-cli/internal/logger" "github.com/schollz/progressbar/v3" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -20,55 +19,111 @@ import ( // HealthStatus represents the health status of an instance type HealthStatus struct { - Healthy bool - Ready int - Total int - Pods []corev1.Pod + Healthy bool + Ready int + Total int + ActivePods int + Pods []corev1.Pod +} + +// DeploymentStatus represents the rollout status of a deployment +type DeploymentStatus struct { + Ready int + Desired int + Complete bool +} + +// shouldCountPod returns true if the pod should be counted toward instance health +// excludes completed, failed, and terminating pods +func shouldCountPod(pod *corev1.Pod) bool { + return pod.Status.Phase != corev1.PodSucceeded && + pod.Status.Phase != corev1.PodFailed && + pod.DeletionTimestamp == nil } -// getHealthStatus returns health metrics -func getHealthStatus(ctx context.Context, k8sClient *client.Client, namespace string) (*HealthStatus, error) { +// IsPodReady checks if a pod is ready based on pod status condition +func IsPodReady(pod *corev1.Pod) bool { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady { + return condition.Status == corev1.ConditionTrue + } + } + return false +} + +// GetHealthStatus returns instance pod health +func GetHealthStatus(ctx context.Context, k8sClient *client.Client, namespace string) (*HealthStatus, error) { pods, err := k8sClient.Clientset().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing pods: %w", err) } - var filteredPods []corev1.Pod - for _, pod := range pods.Items { - if pod.Status.Phase == corev1.PodSucceeded { - continue + deployments, err := k8sClient.Clientset().AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("listing deployments: %w", err) + } + + desiredTotal := 0 + for _, d := range deployments.Items { + if d.Spec.Replicas != nil { + desiredTotal += int(*d.Spec.Replicas) } - filteredPods = append(filteredPods, pod) } - total := len(filteredPods) - if total == 0 { - return &HealthStatus{ - Healthy: false, - Ready: 0, - Total: 0, - Pods: nil, - }, nil + var filteredPods []corev1.Pod + for _, pod := range pods.Items { + if shouldCountPod(&pod) { + filteredPods = append(filteredPods, pod) + } } ready := 0 for _, pod := range filteredPods { - if isPodReady(&pod) { + if IsPodReady(&pod) { ready++ } } - healthy := ready == total + total := desiredTotal + if total == 0 { + total = len(filteredPods) + } return &HealthStatus{ - Healthy: healthy, - Ready: ready, - Total: total, - Pods: filteredPods, + Healthy: ready == total, + Ready: ready, + Total: total, + ActivePods: len(filteredPods), + Pods: filteredPods, }, nil } -// Helper to print instance pod status +// pollUntil runs fn on every interval tick until fn returns done=true, fn returns +// an error, or the timeout is exceeded. +func pollUntil(ctx context.Context, interval, timeout time.Duration, fn func() (done bool, err error)) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + timeoutCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for { + select { + case <-ticker.C: + done, err := fn() + if err != nil { + return err + } + if done { + return nil + } + case <-timeoutCtx.Done(): + return fmt.Errorf("timeout: %w", timeoutCtx.Err()) + } + } +} + +// printHealthStatus prints pod-level health details to stdout. func printHealthStatus(namespace string, status *HealthStatus) { if status.Total == 0 { fmt.Printf("No pods found in namespace %s\n", namespace) @@ -76,13 +131,11 @@ func printHealthStatus(namespace string, status *HealthStatus) { } fmt.Printf("\nNamespace: %s\n", namespace) - fmt.Printf("Ready: %d/%d pods\n\n", status.Ready, status.Total) + fmt.Printf("Ready: %d/%d pods (active: %d)\n\n", status.Ready, status.Total, status.ActivePods) fmt.Println("Pod Status:") - for _, pod := range status.Pods { - ready := isPodReady(&pod) icon := constants.IconNotReady - if ready { + if IsPodReady(&pod) { icon = constants.IconReady } fmt.Printf(" %s %-50s %s\n", icon, pod.Name, pod.Status.Phase) @@ -90,148 +143,246 @@ func printHealthStatus(namespace string, status *HealthStatus) { fmt.Println() } -// checkHealth checks the current health status and prints details -func checkHealth(ctx context.Context, k8sClient *client.Client, namespace string) error { - status, err := getHealthStatus(ctx, k8sClient, namespace) - if err != nil { - return fmt.Errorf("getting health status: %w", err) - } - - printHealthStatus(namespace, status) - - if !status.Healthy { - return fmt.Errorf("instance is not healthy: %d/%d pods ready", status.Ready, status.Total) +// getNotReadyNames returns the names of pods that are not ready. +func getNotReadyNames(pods []corev1.Pod) []string { + var names []string + for _, pod := range pods { + if !IsPodReady(&pod) { + names = append(names, pod.Name) + } } - - logger.Info("Instance is healthy") - return nil + return names } -// waitForInstanceHealthy waits for instance to become healthy -func waitForInstanceHealthy(ctx context.Context, k8sClient *client.Client, namespace string, timeout time.Duration) error { - ticker := time.NewTicker(constants.TickerDuration) - defer ticker.Stop() - - timeoutCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() +// WaitForInstanceHealthy waits for an instance to become healthy. +// +// When callback is non-nil (gRPC mode), it is called on every tick with the +// current status and no progress bar is rendered. When callback is nil (CLI +// mode), a progress bar is written to stdout. +func WaitForInstanceHealthy( + ctx context.Context, + k8sClient *client.Client, + namespace string, + timeout time.Duration, + callback func(*HealthStatus) error, +) error { + var bar *progressbar.ProgressBar + if callback == nil { + initial, err := GetHealthStatus(ctx, k8sClient, namespace) + if err != nil { + return fmt.Errorf("getting initial health status: %w", err) + } + if initial.Total > 0 { + bar = createProgressBar(initial.Total, "Pods ready", constants.AddDetailLineBuffer) + } + } var lastStatus *HealthStatus - var bar *progressbar.ProgressBar - for { - select { - case <-ticker.C: - status, err := getHealthStatus(ctx, k8sClient, namespace) - if err != nil { - logger.Debug("Error checking health: %v", err) - continue - } - lastStatus = status + err := pollUntil(ctx, constants.TickerDuration, timeout, func() (bool, error) { + status, err := GetHealthStatus(ctx, k8sClient, namespace) + if err != nil { + logger.Debug("Error checking health: %v", err) + return false, nil + } + lastStatus = status + if callback != nil { + if err := callback(status); err != nil { + return false, err + } + } else { if bar == nil && status.Total > 0 { bar = createProgressBar(status.Total, "Pods ready", constants.AddDetailLineBuffer) - } else if bar != nil { - bar.ChangeMax(status.Total) } if bar != nil && !bar.IsFinished() { notReady := getNotReadyNames(status.Pods) + detail := "" if len(notReady) > 0 { - if err := bar.AddDetail(fmt.Sprintf("%s Pending: %s", constants.IconNotReady, strings.Join(notReady, ", "))); err != nil { - return fmt.Errorf("adding pending pods detail: %w", err) - } - } else { - if err := bar.AddDetail(""); err != nil { - return fmt.Errorf("adding empty detail: %w", err) - } + detail = fmt.Sprintf("%s Pending: %s", constants.IconNotReady, strings.Join(notReady, ", ")) + } + if err := bar.AddDetail(detail); err != nil { + return false, fmt.Errorf("updating progress bar detail: %w", err) } if err := bar.Set(status.Ready); err != nil { - return fmt.Errorf("setting progress bar: %w", err) + return false, fmt.Errorf("setting progress bar: %w", err) } } - if status.Healthy { - if bar != nil && !bar.IsFinished() { - if err := bar.Finish(); err != nil { - return fmt.Errorf("finishing progress bar: %w", err) - } - } - logger.Info("Instance is healthy: %d/%d pods ready", status.Ready, status.Total) - return nil - } + } - case <-timeoutCtx.Done(): + if status.Healthy { if bar != nil && !bar.IsFinished() { if err := bar.Finish(); err != nil { - return fmt.Errorf("finishing progress bar: %w", err) + return false, fmt.Errorf("finishing progress bar: %w", err) } } - logger.Warn("Timeout reached. Current status:") - if lastStatus != nil { - printHealthStatus(namespace, lastStatus) - } - return fmt.Errorf("timeout waiting for instance to become healthy") + logger.Info("Instance is healthy: %d/%d pods ready", status.Ready, status.Total) + return true, nil + } + return false, nil + }) + + if err != nil { + if bar != nil && !bar.IsFinished() { + _ = bar.Finish() } + logger.Warn("Timeout reached. Current status:") + if lastStatus != nil { + printHealthStatus(namespace, lastStatus) + } + return fmt.Errorf("timeout waiting for instance to become healthy") } + return nil } -func createProgressBar(max int, description string, maxDetailRow int) *progressbar.ProgressBar { - opts := []progressbar.Option{ - progressbar.OptionSetDescription(description), - progressbar.OptionSetWidth(constants.ProgressBarWidth), - progressbar.OptionSetWriter(os.Stdout), - progressbar.OptionSetMaxDetailRow(maxDetailRow), - progressbar.OptionSetTheme(progressbar.Theme{ - Saucer: constants.Saucer, - SaucerPadding: constants.SaucerPadding, - BarStart: constants.BarStart, - BarEnd: constants.BarEnd, - }), - progressbar.OptionThrottle(constants.ThrottleDuration), - progressbar.OptionOnCompletion(func() { - fmt.Println() - }), +// waitForDeploymentReady waits for a specific deployment rollout to complete. +func waitForDeploymentReady( + ctx context.Context, + k8sClient *client.Client, + namespace, deploymentName string, + timeout time.Duration, + callback func(*DeploymentStatus) error, +) error { + logger.Debug("Waiting for deployment %s to be ready (timeout: %v)", deploymentName, timeout) + + deployment, err := k8sClient.Clientset().AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("getting deployment %s: %w", deploymentName, err) } + desired := int(*deployment.Spec.Replicas) - if max > 0 { - opts = append(opts, progressbar.OptionShowCount()) - } else { - opts = append(opts, progressbar.OptionSpinnerType(constants.SpinnerType)) + var bar *progressbar.ProgressBar + if callback == nil && desired > 0 { + bar = createProgressBar(desired, fmt.Sprintf("Waiting for %s rollout", deploymentName), 0) } - return progressbar.NewOptions(max, opts...) -} + var lastDeployment *appsv1.Deployment -// isPodReady checks if a pod is ready -func isPodReady(pod *corev1.Pod) bool { - if pod.DeletionTimestamp != nil { - return false - } + err = pollUntil(ctx, constants.TickerDuration, timeout, func() (bool, error) { + d, err := k8sClient.Clientset().AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + if err != nil { + logger.Debug("Error getting deployment: %v", err) + return false, nil + } + lastDeployment = d - for _, condition := range pod.Status.Conditions { - if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { - for _, container := range pod.Status.ContainerStatuses { - if !container.Ready { - return false + desired := int(*d.Spec.Replicas) + updated := int(d.Status.UpdatedReplicas) + ready := int(d.Status.ReadyReplicas) + available := int(d.Status.AvailableReplicas) + total := int(d.Status.Replicas) + + status := &DeploymentStatus{ + Ready: ready, + Desired: desired, + } + + complete := d.Status.ObservedGeneration >= d.Generation && + updated == desired && + available == desired && + ready == desired && + total == desired + + if callback != nil { + status.Complete = complete + if err := callback(status); err != nil { + return false, err + } + } else { + if bar != nil && !bar.IsFinished() { + if err := bar.Set(ready); err != nil { + return false, fmt.Errorf("setting progress bar: %w", err) } } - return true } - } - return false + logger.Debug("Deployment %s: %d/%d updated, %d/%d ready, %d total (generation: %d/%d)", + deploymentName, updated, desired, ready, desired, total, + d.Status.ObservedGeneration, d.Generation) + + if complete { + if bar != nil && !bar.IsFinished() { + if err := bar.Finish(); err != nil { + return false, fmt.Errorf("finishing progress bar: %w", err) + } + } + logger.Info("Deployment %s is ready with %d replicas", deploymentName, desired) + return true, nil + } + return false, nil + }) + + if err != nil { + if bar != nil && !bar.IsFinished() { + _ = bar.Finish() + } + logger.Warn("Timeout reached. Deployment status:") + if lastDeployment != nil { + printDeploymentStatus(namespace, deploymentName, lastDeployment) + } + return fmt.Errorf("timeout waiting for deployment %s rollout", deploymentName) + } + return nil } -// getNotReadyNames -func getNotReadyNames(pods []corev1.Pod) []string { - var names []string - for _, pod := range pods { - if !isPodReady(&pod) { - names = append(names, pod.Name) +// waitForNamespaceDeletion waits for a namespace to be completely deleted. +func waitForNamespaceDeletion( + ctx context.Context, + k8sClient *client.Client, + namespace string, + timeout time.Duration, + callback func(elapsedSeconds int) error, +) error { + clientset := k8sClient.Clientset() + + var bar *progressbar.ProgressBar + if callback == nil { + bar = createProgressBar(-1, fmt.Sprintf("Stopping %s", namespace), 0) + } + + startTime := time.Now() + + err := pollUntil(ctx, constants.TickerDuration, timeout, func() (bool, error) { + elapsed := int(time.Since(startTime).Seconds()) + + if bar != nil { + _ = bar.Add(1) + } + + if callback != nil { + if err := callback(elapsed); err != nil { + return false, err + } + } + + _, err := clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + if !errors.IsNotFound(err) { + logger.Warn("Error checking namespace: %v", err) + return false, nil + } + if bar != nil { + if err := bar.Finish(); err != nil { + return false, fmt.Errorf("finishing progress bar: %w", err) + } + } + logger.Debug("Namespace %s successfully deleted", namespace) + return true, nil } + logger.Debug("Namespace %s still terminating...", namespace) + return false, nil + }) + + if err != nil { + if bar != nil { + _ = bar.Finish() + } + return fmt.Errorf("timeout waiting for namespace %s to be deleted", namespace) } - return names + return nil } -// namespaceIsActive checks if a namespace exists and is active +// namespaceIsActive checks if a namespace exists and is active. func namespaceIsActive(ctx context.Context, k8sClient *client.Client, namespace string) (bool, error) { ns, err := k8sClient.Clientset().CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) if err != nil { @@ -240,11 +391,10 @@ func namespaceIsActive(ctx context.Context, k8sClient *client.Client, namespace } return false, fmt.Errorf("getting namespace: %w", err) } - return ns.Status.Phase == corev1.NamespaceActive, nil } -// Helper to print deployment status +// printDeploymentStatus prints deployment rollout details to stdout. func printDeploymentStatus(namespace, name string, deployment *appsv1.Deployment) { fmt.Printf("\nDeployment: %s (namespace: %s)\n", name, namespace) fmt.Printf("Generation: %d/%d (observed/current)\n", @@ -270,117 +420,27 @@ func printDeploymentStatus(namespace, name string, deployment *appsv1.Deployment fmt.Println() } -// waitForDeploymentReady waits for a specific deployment to be ready -func waitForDeploymentReady(ctx context.Context, k8sClient *client.Client, namespace, deploymentName string, timeout time.Duration) error { - logger.Debug("Waiting for deployment %s to be ready (timeout: %v)", deploymentName, timeout) - - ticker := time.NewTicker(constants.TickerDuration) - defer ticker.Stop() - - timeoutCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - var lastDeployment *appsv1.Deployment - var bar *progressbar.ProgressBar - - for { - select { - case <-ticker.C: - deployment, err := k8sClient.Clientset().AppsV1().Deployments(namespace).Get(timeoutCtx, deploymentName, metav1.GetOptions{}) - if err != nil { - logger.Debug("Error getting deployment: %v", err) - continue - } - - lastDeployment = deployment - - desired := int(*deployment.Spec.Replicas) - updated := int(deployment.Status.UpdatedReplicas) - ready := int(deployment.Status.ReadyReplicas) - available := int(deployment.Status.AvailableReplicas) - total := int(deployment.Status.Replicas) - observedGen := deployment.Status.ObservedGeneration - gen := deployment.Generation - - if bar == nil && desired > 0 { - bar = createProgressBar(-1, fmt.Sprintf("Waiting for %s deployment rollout", deploymentName), 0) - } - - if bar != nil { - _ = bar.Add(1) - } - - if observedGen >= gen && - updated == desired && - available == desired && - ready == desired && - total == desired { - if bar != nil { - if err := bar.Finish(); err != nil { - return fmt.Errorf("finishing progress bar: %w", err) - } - } - logger.Info("Deployment %s is ready with %d replicas", deploymentName, desired) - return nil - } - - logger.Debug("Deployment %s: %d/%d updated, %d/%d ready, %d total (generation: %d/%d)", - deploymentName, - updated, desired, - ready, desired, - total, - observedGen, gen) - - case <-timeoutCtx.Done(): - if bar != nil { - if err := bar.Finish(); err != nil { - return fmt.Errorf("finishing progress bar: %w", err) - } - } - logger.Warn("Timeout reached. Deployment status:") - if lastDeployment != nil { - printDeploymentStatus(namespace, deploymentName, lastDeployment) - } - - return fmt.Errorf("timeout waiting for deployment %s rollout", deploymentName) - } +func createProgressBar(max int, description string, maxDetailRow int) *progressbar.ProgressBar { + opts := []progressbar.Option{ + progressbar.OptionSetDescription(description), + progressbar.OptionSetWidth(constants.ProgressBarWidth), + progressbar.OptionSetWriter(os.Stdout), + progressbar.OptionSetMaxDetailRow(maxDetailRow), + progressbar.OptionSetTheme(progressbar.Theme{ + Saucer: constants.Saucer, + SaucerPadding: constants.SaucerPadding, + BarStart: constants.BarStart, + BarEnd: constants.BarEnd, + }), + progressbar.OptionThrottle(constants.ThrottleDuration), + progressbar.OptionOnCompletion(func() { + fmt.Println() + }), } -} - -// waitForNamespaceDeletion waits for a namespace to be completely deleted -func waitForNamespaceDeletion(ctx context.Context, k8sClient *client.Client, namespace string, timeout time.Duration) error { - clientset := k8sClient.Clientset() - ticker := time.NewTicker(constants.TickerDuration) - defer ticker.Stop() - - timeoutCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - bar := createProgressBar(-1, fmt.Sprintf("Stopping %s", namespace), 0) - - for { - select { - case <-ticker.C: - _ = bar.Add(1) - _, err := clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) - if err != nil { - if !errors.IsNotFound(err) { - logger.Warn("Error checking namespace: %v", err) - continue - } - if err := bar.Finish(); err != nil { - return fmt.Errorf("finishing progress bar: %w", err) - } - logger.Debug("Namespace %s successfully deleted", namespace) - return nil - } - logger.Debug("Namespace %s still terminating...", namespace) - - case <-timeoutCtx.Done(): - if err := bar.Finish(); err != nil { - return fmt.Errorf("finishing progress bar: %w", err) - } - return fmt.Errorf("timeout waiting for namespace %s to be deleted", namespace) - } + if max > 0 { + opts = append(opts, progressbar.OptionShowCount()) + } else { + opts = append(opts, progressbar.OptionSpinnerType(constants.SpinnerType)) } + return progressbar.NewOptions(max, opts...) } diff --git a/internal/k8s/actions/health_check_test.go b/internal/k8s/actions/health_check_test.go index 235752c..bea67f2 100644 --- a/internal/k8s/actions/health_check_test.go +++ b/internal/k8s/actions/health_check_test.go @@ -14,7 +14,7 @@ func TestIsPodReady_Ready(t *testing.T) { }, }, } - if !isPodReady(pod) { + if !IsPodReady(pod) { t.Error("Expected pod to be ready") } } @@ -27,7 +27,7 @@ func TestIsPodReady_NotReady(t *testing.T) { }, }, } - if isPodReady(pod) { + if IsPodReady(pod) { t.Error("Expected pod to not be ready") } } @@ -36,7 +36,7 @@ func TestIsPodReady_NoCondition(t *testing.T) { pod := &corev1.Pod{ Status: corev1.PodStatus{Conditions: []corev1.PodCondition{}}, } - if isPodReady(pod) { + if IsPodReady(pod) { t.Error("Expected pod to not be ready when no Ready condition exists") } } @@ -50,7 +50,7 @@ func TestIsPodReady_MultipleConditions(t *testing.T) { }, }, } - if !isPodReady(pod) { + if !IsPodReady(pod) { t.Error("Expected pod to be ready even with multiple conditions") } } diff --git a/internal/k8s/actions/instance_status.go b/internal/k8s/actions/instance_status.go new file mode 100644 index 0000000..a75f0b1 --- /dev/null +++ b/internal/k8s/actions/instance_status.go @@ -0,0 +1,180 @@ +package actions + +import ( + "context" + "fmt" + "strings" + + "os" + "text/tabwriter" + + "github.com/OpenSlides/openslides-cli/internal/k8s/client" + "github.com/OpenSlides/openslides-cli/internal/logger" + "github.com/OpenSlides/openslides-cli/internal/utils" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + StatusHelp = "Get current status of an OpenSlides instance" + StatusHelpExtra = `Prints pod and container status for a running OpenSlides instance. + +Examples: + osmanage k8s status ./my.instance.dir.org + osmanage k8s status ./my.instance.dir.org --kubeconfig ~/.kube/config` +) + +func GetInstanceStatusCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "status ", + Short: StatusHelp, + Long: StatusHelp + "\n\n" + StatusHelpExtra, + Args: cobra.ExactArgs(1), + } + + kubeconfig := cmd.Flags().String("kubeconfig", "", "Path to kubeconfig file") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + logger.Info("=== K8S INSTANCE STATUS ===") + instanceDir := args[0] + namespace := utils.ExtractNamespace(instanceDir) + + k8sClient, err := client.New(*kubeconfig) + if err != nil { + return fmt.Errorf("creating k8s client: %w", err) + } + + status, err := GetInstanceStatus(context.Background(), k8sClient, namespace) + if err != nil { + return fmt.Errorf("getting instance status: %w", err) + } + + if !status.NamespaceExists { + fmt.Println("Unavailable") + return nil + } + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + if _, err := fmt.Fprintln(w, "NAMESPACE\tSERVICE\tPOD\tREGISTRY\tTAG\tREADY\tSTARTED\tNODE"); err != nil { + return fmt.Errorf("writing header: %w", err) + } + for _, pod := range status.Pods { + for _, container := range pod.Containers { + registry := container.ContainerRegistry + if idx := strings.LastIndex(registry, "/"); idx != -1 { + registry = registry[:idx] + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%v\t%v\t%s\n", + namespace, + pod.Service, + pod.Name, + registry, + container.Tag, + container.Ready, + container.Started, + pod.Node, + ); err != nil { + return fmt.Errorf("writing row: %w", err) + } + } + } + if err := w.Flush(); err != nil { + return fmt.Errorf("flushing output: %w", err) + } + return nil + } + + return cmd +} + +type ContainerStatus struct { + Name string + Tag string + ContainerRegistry string + Ready bool + Started bool + EnvVars map[string]string +} + +type PodStatus struct { + Name string + Service string + Node string + Containers []ContainerStatus +} + +type InstanceStatus struct { + NamespaceExists bool + Pods []PodStatus + ServiceCounts map[string]int32 +} + +func GetInstanceStatus(ctx context.Context, k8sClient *client.Client, namespace string) (*InstanceStatus, error) { + clientset := k8sClient.Clientset() + + // check namespace exists + _, err := clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + return &InstanceStatus{NamespaceExists: false}, nil + } + + podList, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("listing pods: %w", err) + } + + var pods []PodStatus + serviceCounts := make(map[string]int32) + + for _, pod := range podList.Items { + // skip terminating pods + if pod.DeletionTimestamp != nil { + continue + } + + service := pod.Labels["osinstance/service"] + serviceCounts[service]++ + + var containers []ContainerStatus + for _, cs := range pod.Status.ContainerStatuses { + if cs.Name == "redis" { + continue + } + registryService, tag, _ := strings.Cut(cs.Image, ":") + registry := registryService[:strings.LastIndex(registryService, "/")] + + envVars := make(map[string]string) + for _, spec := range pod.Spec.Containers { + if spec.Name != cs.Name { + continue + } + for _, env := range spec.Env { + envVars[env.Name] = env.Value + } + } + + started := cs.Started != nil && *cs.Started + containers = append(containers, ContainerStatus{ + Name: cs.Name, + Tag: tag, + ContainerRegistry: registry, + Ready: cs.Ready, + Started: started, + EnvVars: envVars, + }) + } + + pods = append(pods, PodStatus{ + Name: pod.Name, + Service: service, + Node: pod.Spec.NodeName, + Containers: containers, + }) + } + + return &InstanceStatus{ + NamespaceExists: true, + Pods: pods, + ServiceCounts: serviceCounts, + }, nil +} diff --git a/internal/k8s/actions/scale.go b/internal/k8s/actions/scale.go index a995b22..c8312b1 100644 --- a/internal/k8s/actions/scale.go +++ b/internal/k8s/actions/scale.go @@ -5,6 +5,7 @@ import ( "fmt" "path/filepath" "strings" + "time" "github.com/OpenSlides/openslides-cli/internal/constants" "github.com/OpenSlides/openslides-cli/internal/k8s/client" @@ -48,40 +49,45 @@ func ScaleCmd() *cobra.Command { logger.Info("=== K8S SCALE SERVICE ===") instanceDir := args[0] logger.Debug("Instance directory: %s", instanceDir) - logger.Info("Service: %s", *service) - - namespace := utils.ExtractNamespace(instanceDir) - logger.Info("Namespace: %s", namespace) k8sClient, err := client.New(*kubeconfig) if err != nil { return fmt.Errorf("creating k8s client: %w", err) } - ctx := context.Background() + if err := ScaleService(context.Background(), k8sClient, *service, instanceDir, *skipReadyCheck, *timeout, nil); err != nil { + return err + } - // Construct path to deployment file - deploymentFile := fmt.Sprintf(constants.DeploymentFileTemplate, *service) - deploymentPath := filepath.Join(instanceDir, constants.StackDirName, deploymentFile) + logger.Info("%s service scaled successfully", *service) + return nil + } - logger.Info("Applying deployment manifest: %s", deploymentPath) - if _, err := applyManifest(ctx, k8sClient, deploymentPath); err != nil { - return fmt.Errorf("applying deployment: %w", err) - } + return cmd +} - if *skipReadyCheck { - logger.Info("Skipping ready check") - return nil - } +// ScaleService applies the deployment manifest for a service and optionally waits for rollout. +func ScaleService(ctx context.Context, k8sClient *client.Client, service, instanceDir string, skipReadyCheck bool, timeout time.Duration, callback func(*DeploymentStatus) error) error { + namespace := utils.ExtractNamespace(instanceDir) + logger.Info("Service: %s", service) + logger.Info("Namespace: %s", namespace) - // Wait for the specific deployment (OpenSlides service name is deployment name) - if err := waitForDeploymentReady(ctx, k8sClient, namespace, *service, *timeout); err != nil { - return fmt.Errorf("waiting for deployment ready: %w", err) - } + deploymentFile := fmt.Sprintf(constants.DeploymentFileTemplate, service) + deploymentPath := filepath.Join(instanceDir, constants.StackDirName, deploymentFile) - logger.Info("%s service scaled successfully", *service) + logger.Info("Applying deployment manifest: %s", deploymentPath) + if _, _, err := applyManifest(ctx, k8sClient, deploymentPath, nil); err != nil { + return fmt.Errorf("applying deployment: %w", err) + } + + if skipReadyCheck { + logger.Info("Skipping ready check") return nil } - return cmd + if err := waitForDeploymentReady(ctx, k8sClient, namespace, service, timeout, callback); err != nil { + return fmt.Errorf("waiting for deployment ready: %w", err) + } + + return nil } diff --git a/internal/k8s/actions/start.go b/internal/k8s/actions/start.go index 0d1b643..463c76d 100644 --- a/internal/k8s/actions/start.go +++ b/internal/k8s/actions/start.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path/filepath" + "time" "github.com/OpenSlides/openslides-cli/internal/constants" "github.com/OpenSlides/openslides-cli/internal/k8s/client" @@ -19,7 +20,8 @@ const ( Examples: osmanage k8s start ./my.instance.dir.org osmanage k8s start ./my.instance.dir.org --skip-ready-check - osmanage k8s start ./my.instance.dir.org --kubeconfig ~/.kube/config --timeout 30s` + osmanage k8s start ./my.instance.dir.org --kubeconfig ~/.kube/config --timeout 30s + osmanage k8s start ./my.instance.dir.org --labels osinstance/examplelabel=true,osinstance/examplelabel2=10` ) func StartCmd() *cobra.Command { @@ -33,6 +35,7 @@ func StartCmd() *cobra.Command { kubeconfig := cmd.Flags().String("kubeconfig", "", "Path to kubeconfig file") skipReadyCheck := cmd.Flags().Bool("skip-ready-check", false, "Skip waiting for instance to become ready") timeout := cmd.Flags().Duration("timeout", constants.DefaultInstanceTimeout, "Timeout for instance health check") + labels := cmd.Flags().StringToString("labels", nil, "Label selector to filter resources, e.g. 'osinstance/migrate=true'") cmd.RunE = func(cmd *cobra.Command, args []string) error { logger.Info("=== K8S START INSTANCE ===") @@ -44,45 +47,54 @@ func StartCmd() *cobra.Command { return fmt.Errorf("creating k8s client: %w", err) } - ctx := context.Background() - - namespacePath := filepath.Join(instanceDir, constants.NamespaceYAML) - namespace, err := applyManifest(ctx, k8sClient, namespacePath) - if err != nil { - return fmt.Errorf("applying namespace: %w", err) + if err := StartInstance(context.Background(), k8sClient, instanceDir, *skipReadyCheck, *timeout, *labels, nil); err != nil { + return err } - logger.Info("Applied namespace: %s", namespace) - tlsSecretPath := filepath.Join(instanceDir, constants.SecretsDirName, constants.TlsCertSecretYAML) - tlsExists, err := utils.FileExists(tlsSecretPath) - if err != nil { - return fmt.Errorf("checking tls secret path %s: %w", tlsSecretPath, err) - } - if tlsExists { - logger.Info("Found and applying %s", tlsSecretPath) - if _, err := applyManifest(ctx, k8sClient, tlsSecretPath); err != nil { - return fmt.Errorf("applying TLS secret: %w", err) - } - } + logger.Info("Instance started successfully") + return nil + } - stackDir := filepath.Join(instanceDir, constants.StackDirName) - logger.Info("Applying stack manifests from: %s", stackDir) - if err := applyDirectory(ctx, k8sClient, stackDir); err != nil { - return fmt.Errorf("applying stack: %w", err) - } + return cmd +} - if *skipReadyCheck { - logger.Info("Skipping ready check") - return nil - } +// StartInstance applies namespace, optional TLS secret, and stack manifests, +// then optionally waits for all pods to become healthy. +func StartInstance(ctx context.Context, k8sClient *client.Client, instanceDir string, skipReadyCheck bool, timeout time.Duration, labels map[string]string, callback func(*HealthStatus) error) error { + namespacePath := filepath.Join(instanceDir, constants.NamespaceYAML) + _, namespace, err := applyManifest(ctx, k8sClient, namespacePath, nil) + if err != nil { + return fmt.Errorf("applying namespace: %w", err) + } + logger.Info("Applied namespace: %s", namespace) - if err := waitForInstanceHealthy(ctx, k8sClient, namespace, *timeout); err != nil { - return fmt.Errorf("waiting for ready: %w", err) + tlsSecretPath := filepath.Join(instanceDir, constants.SecretsDirName, constants.TlsCertSecretYAML) + tlsExists, err := utils.FileExists(tlsSecretPath) + if err != nil { + return fmt.Errorf("checking tls secret path %s: %w", tlsSecretPath, err) + } + if tlsExists { + logger.Info("Found and applying %s", tlsSecretPath) + if _, _, err := applyManifest(ctx, k8sClient, tlsSecretPath, nil); err != nil { + return fmt.Errorf("applying TLS secret: %w", err) } + } - logger.Info("Instance started successfully") + stackDir := filepath.Join(instanceDir, constants.StackDirName) + logger.Info("Applying stack manifests from: %s", stackDir) + if _, err := applyDirectory(ctx, k8sClient, stackDir, labels); err != nil { + return fmt.Errorf("applying stack: %w", err) + } + + if skipReadyCheck { + logger.Info("Skipping ready check") return nil } - return cmd + logger.Info("Waiting for instance to become ready...") + if err := WaitForInstanceHealthy(ctx, k8sClient, namespace, timeout, callback); err != nil { + return fmt.Errorf("waiting for ready: %w", err) + } + + return nil } diff --git a/internal/k8s/actions/stop.go b/internal/k8s/actions/stop.go index b46a7ce..df98a8a 100644 --- a/internal/k8s/actions/stop.go +++ b/internal/k8s/actions/stop.go @@ -48,16 +48,8 @@ func StopCmd() *cobra.Command { return fmt.Errorf("creating k8s client: %w", err) } - ctx := context.Background() - - namespace := utils.ExtractNamespace(instanceDir) - if err := saveTLSSecret(ctx, k8sClient, namespace, instanceDir); err != nil { - logger.Warn("Failed to save TLS secret: %v", err) - } - - logger.Info("Stopping instance: %s", namespace) - if err := deleteNamespace(ctx, k8sClient, namespace, *timeout); err != nil { - return fmt.Errorf("deleting namespace: %w", err) + if err := StopInstance(context.Background(), k8sClient, instanceDir, *timeout, nil); err != nil { + return err } logger.Info("Instance stopped successfully") @@ -67,6 +59,23 @@ func StopCmd() *cobra.Command { return cmd } +// StopInstance saves the TLS secret if present, then deletes the namespace +// and waits for it to be fully removed. +func StopInstance(ctx context.Context, k8sClient *client.Client, instanceDir string, timeout time.Duration, callback func(elapsedSeconds int) error) error { + namespace := utils.ExtractNamespace(instanceDir) + + if err := saveTLSSecret(ctx, k8sClient, namespace, instanceDir); err != nil { + logger.Warn("Failed to save TLS secret: %v", err) + } + + logger.Info("Stopping instance: %s", namespace) + if err := deleteNamespace(ctx, k8sClient, namespace, timeout, callback); err != nil { + return fmt.Errorf("deleting namespace: %w", err) + } + + return nil +} + // saveTLSSecret saves the TLS certificate secret to a YAML file if it exists func saveTLSSecret(ctx context.Context, k8sClient *client.Client, namespace, instanceDir string) error { clientset := k8sClient.Clientset() @@ -97,7 +106,7 @@ func saveTLSSecret(ctx context.Context, k8sClient *client.Client, namespace, ins } // deleteNamespace deletes a Kubernetes namespace -func deleteNamespace(ctx context.Context, k8sClient *client.Client, namespace string, timeout time.Duration) error { +func deleteNamespace(ctx context.Context, k8sClient *client.Client, namespace string, timeout time.Duration, callback func(elapsedSeconds int) error) error { clientset := k8sClient.Clientset() logger.Debug("Deleting namespace: %s", namespace) @@ -113,5 +122,5 @@ func deleteNamespace(ctx context.Context, k8sClient *client.Client, namespace st logger.Info("Namespace %s deletion initiated", namespace) logger.Debug("Waiting for namespace to be fully deleted...") - return waitForNamespaceDeletion(ctx, k8sClient, namespace, timeout) + return waitForNamespaceDeletion(ctx, k8sClient, namespace, timeout, callback) } diff --git a/internal/k8s/actions/update_backendmanage.go b/internal/k8s/actions/update_backendmanage.go index f5e143c..4790b12 100644 --- a/internal/k8s/actions/update_backendmanage.go +++ b/internal/k8s/actions/update_backendmanage.go @@ -9,7 +9,6 @@ import ( "github.com/OpenSlides/openslides-cli/internal/constants" "github.com/OpenSlides/openslides-cli/internal/k8s/client" "github.com/OpenSlides/openslides-cli/internal/logger" - "github.com/OpenSlides/openslides-cli/internal/utils" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -20,9 +19,8 @@ const ( UpdateBackendmanageHelpExtra = `Updates the backendmanage service deployment image tag and registry to new version. Examples: - osmanage k8s update-backendmanage ./my.instance.dir.org --kubeconfig ~/.kube/config --tag 4.2.23 --container-registry myRegistry - osmanage k8s update-backendmanage ./my.instance.dir.org --tag 4.2.23 --container-registry myRegistry --timeout 30s - osmanage k8s update-backendmanage ./my.instance.dir.org --tag 4.2.23 --container-registry myRegistry --revert --timeout 30s` + osmanage k8s update-backendmanage my.instance.url.org --kubeconfig ~/.kube/config --tag 4.2.23 --container-registry myRegistry + osmanage k8s update-backendmanage my.instance.url.org --tag 4.2.23 --container-registry myRegistry --timeout 30s` ) func UpdateBackendmanageCmd() *cobra.Command { @@ -36,7 +34,6 @@ func UpdateBackendmanageCmd() *cobra.Command { tag := cmd.Flags().StringP("tag", "t", "", "Image tag (required)") containerRegistry := cmd.Flags().String("container-registry", "", "Container registry (required)") kubeconfig := cmd.Flags().String("kubeconfig", "", "Path to kubeconfig file") - revert := cmd.Flags().Bool("revert", false, "Changes image back with given tag and registry") timeout := cmd.Flags().Duration("timeout", constants.DefaultDeploymentTimeout, "Timeout for deployment rollout check") _ = cmd.MarkFlagRequired("tag") @@ -50,39 +47,35 @@ func UpdateBackendmanageCmd() *cobra.Command { return fmt.Errorf("--container-registry cannot be empty") } - logger.Info("=== K8S UPDATE/REVERT BACKENDMANAGE ===") - instanceDir := args[0] - namespace := utils.ExtractNamespace(instanceDir) - - logger.Info("Namespace: %s", namespace) + logger.Info("=== K8S UPDATE BACKENDMANAGE ===") + instanceUrl := args[0] k8sClient, err := client.New(*kubeconfig) if err != nil { return fmt.Errorf("creating k8s client: %w", err) } - ctx := context.Background() - - if *revert { - if err := revertBackendmanage(ctx, k8sClient, namespace, *tag, *containerRegistry, *timeout); err != nil { - return err - } - - logger.Info("Successfully reverted backendmanage") - } else { - if err := updateBackendmanage(ctx, k8sClient, namespace, *tag, *containerRegistry, *timeout); err != nil { - return err - } - - logger.Info("Successfully updated backendmanage") + if err := UpdateBackendmanage(context.Background(), k8sClient, instanceUrl, *tag, *containerRegistry, *timeout, nil); err != nil { + return err } + + logger.Info("Successfully updated backendmanage") return nil } return cmd } -func updateBackendmanage(ctx context.Context, k8sClient *client.Client, namespace, tag, containerRegistry string, timeout time.Duration) error { +// UpdateBackendmanage updates or reverts the backendmanage deployment image and waits for rollout. +// When revert is true the operation is logged as a revert, otherwise as an update. +func UpdateBackendmanage( + ctx context.Context, + k8sClient *client.Client, + instanceUrl, tag, containerRegistry string, + timeout time.Duration, + callback func(*DeploymentStatus) error, +) error { + namespace := strings.ReplaceAll(instanceUrl, ".", "") image := fmt.Sprintf(constants.BackendmanageImageTemplate, containerRegistry, tag) logger.Info("Updating deployment to image: %s", image) @@ -101,37 +94,9 @@ func updateBackendmanage(ctx context.Context, k8sClient *client.Client, namespac } logger.Info("Patch applied (generation: %d)", updated.Generation) - logger.Info("Waiting for rollout to complete...") - if err := waitForDeploymentReady(ctx, k8sClient, namespace, constants.BackendmanageDeploymentName, timeout); err != nil { - return fmt.Errorf("rollout failed: %w", err) - } - - return nil -} - -func revertBackendmanage(ctx context.Context, k8sClient *client.Client, namespace, tag, containerRegistry string, timeout time.Duration) error { - image := fmt.Sprintf(constants.BackendmanageImageTemplate, containerRegistry, tag) - - logger.Info("Reverting deployment to image: %s", image) - - patch := fmt.Appendf(nil, constants.BackendmanagePatchTemplate, constants.BackendmanageContainerName, image) - updated, err := k8sClient.Clientset().AppsV1().Deployments(namespace).Patch( - ctx, - constants.BackendmanageDeploymentName, - types.StrategicMergePatchType, - patch, - metav1.PatchOptions{}, - ) - if err != nil { - return fmt.Errorf("patching deployment: %w", err) - } - - logger.Info("Patch applied (generation: %d)", updated.Generation) - - logger.Info("Waiting for rollout to complete...") - if err := waitForDeploymentReady(ctx, k8sClient, namespace, constants.BackendmanageDeploymentName, timeout); err != nil { + if err := waitForDeploymentReady(ctx, k8sClient, namespace, constants.BackendmanageDeploymentName, timeout, callback); err != nil { return fmt.Errorf("rollout failed: %w", err) } diff --git a/internal/k8s/actions/update_instance.go b/internal/k8s/actions/update_instance.go index 1ebed7e..d04e2d0 100644 --- a/internal/k8s/actions/update_instance.go +++ b/internal/k8s/actions/update_instance.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path/filepath" + "time" "github.com/OpenSlides/openslides-cli/internal/constants" "github.com/OpenSlides/openslides-cli/internal/k8s/client" @@ -40,49 +41,74 @@ func UpdateInstanceCmd() *cobra.Command { logger.Debug("Instance directory: %s", instanceDir) - namespace := utils.ExtractNamespace(instanceDir) - logger.Info("Namespace: %s", namespace) - k8sClient, err := client.New(*kubeconfig) if err != nil { return fmt.Errorf("creating k8s client: %w", err) } - ctx := context.Background() - - isActive, err := namespaceIsActive(ctx, k8sClient, namespace) - if err != nil { - return fmt.Errorf("checking namespace: %w", err) + if err := UpdateInstance(context.Background(), k8sClient, instanceDir, *skipReadyCheck, *timeout, nil, nil); err != nil { + return err } - if !isActive { - logger.Info("%s is not running.", namespace) - logger.Info("The configuration has been updated and the instance will be upgraded upon its next start.") - logger.Info("Note that the next start might take a long time due to pending migrations.") - logger.Info("Consider starting the instance and running migrations now.") - logger.Info("Alternatively, downgrade for now and run migrations in the background once the instance is started.") - return nil - } + logger.Info("Instance updated successfully") + return nil + } - logger.Info("Updating OpenSlides services.") + return cmd +} - stackDir := filepath.Join(instanceDir, constants.StackDirName) - if err := applyDirectory(ctx, k8sClient, stackDir); err != nil { - return fmt.Errorf("applying stack: %w", err) - } +// UpdateInstance applies new stack manifests and optionally waits for instance +// to become healthy. Returns early with inactive=true if the namespace is not running. +func UpdateInstance( + ctx context.Context, + k8sClient *client.Client, + instanceDir string, + skipReadyCheck bool, + timeout time.Duration, + callback func(*HealthStatus) error, + inactiveCallback func() error, +) error { + namespace := utils.ExtractNamespace(instanceDir) + logger.Info("Namespace: %s", namespace) + + isActive, err := namespaceIsActive(ctx, k8sClient, namespace) + if err != nil { + return fmt.Errorf("checking namespace: %w", err) + } - if *skipReadyCheck { - logger.Info("Skip ready check.") - return nil + if !isActive { + logger.Info("%s is not running.", namespace) + logger.Info("The configuration has been updated and the instance will be upgraded upon its next start.") + logger.Info("Note that the next start might take a long time due to pending migrations.") + logger.Info("Consider starting the instance and running migrations now.") + logger.Info("Alternatively, downgrade for now and run migrations in the background once the instance is started.") + if inactiveCallback != nil { + return inactiveCallback() } + return nil + } - if err := waitForInstanceHealthy(ctx, k8sClient, namespace, *timeout); err != nil { - return fmt.Errorf("waiting for instance health: %w", err) - } + logger.Info("Updating OpenSlides services.") - logger.Info("Instance updated successfully") + stackDir := filepath.Join(instanceDir, constants.StackDirName) + applied, err := applyDirectory(ctx, k8sClient, stackDir, nil) + if err != nil { + return fmt.Errorf("applying stack: %w", err) + } + + if err := pruneOrphans(ctx, k8sClient, namespace, applied); err != nil { + logger.Warn("Failed to prune orphaned resources: %v", err) + } + + if skipReadyCheck { + logger.Info("Skip ready check.") return nil } - return cmd + logger.Info("Waiting for instance to become ready...") + if err := WaitForInstanceHealthy(ctx, k8sClient, namespace, timeout, callback); err != nil { + return fmt.Errorf("waiting for instance health: %w", err) + } + + return nil } diff --git a/internal/k8s/client/client.go b/internal/k8s/client/client.go index c3323a0..6a085a2 100644 --- a/internal/k8s/client/client.go +++ b/internal/k8s/client/client.go @@ -6,7 +6,6 @@ import ( "sync" "github.com/OpenSlides/openslides-cli/internal/logger" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -28,6 +27,8 @@ type Client struct { restMapper meta.RESTMapper mapperOnce sync.Once mapperErr error + + apiGroupResources []*restmapper.APIGroupResources } // New creates a Kubernetes client from the given kubeconfig path. @@ -87,8 +88,8 @@ func getDefaultKubeconfigPath() string { return filepath.Join( home, - clientcmd.RecommendedHomeDir, // ".kube" - clientcmd.RecommendedFileName, // "config" + clientcmd.RecommendedHomeDir, + clientcmd.RecommendedFileName, ) } @@ -125,9 +126,19 @@ func (c *Client) RESTMapper() (meta.RESTMapper, error) { return } + c.apiGroupResources = apiGroupResources c.restMapper = restmapper.NewDiscoveryRESTMapper(apiGroupResources) logger.Debug("REST mapper initialized") }) return c.restMapper, c.mapperErr } + +// APIGroupResources returns the cached API group resources, initializing the +// REST mapper if needed. Used for iterating over all known resource types. +func (c *Client) APIGroupResources() ([]*restmapper.APIGroupResources, error) { + if _, err := c.RESTMapper(); err != nil { + return nil, err + } + return c.apiGroupResources, nil +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go index eaa3c96..deb8028 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -38,9 +38,14 @@ func New(levelStr string) (*Logger, error) { return nil, fmt.Errorf("invalid log level: %s", levelStr) } + flags := log.LstdFlags + if os.Getenv("INVOCATION_ID") != "" { // if used as systemd service + flags = 0 + } + return &Logger{ level: level, - logger: log.New(os.Stderr, "", log.LstdFlags), + logger: log.New(os.Stderr, "", flags), }, nil } diff --git a/internal/manage/actions/get/get.go b/internal/manage/actions/get/get.go index 070b723..5918dbd 100644 --- a/internal/manage/actions/get/get.go +++ b/internal/manage/actions/get/get.go @@ -1,7 +1,6 @@ package get import ( - "cmp" "context" "encoding/json" "fmt" @@ -17,7 +16,7 @@ import ( "github.com/OpenSlides/openslides-cli/internal/constants" "github.com/OpenSlides/openslides-cli/internal/logger" - + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" "github.com/OpenSlides/openslides-go/datastore" "github.com/OpenSlides/openslides-go/datastore/dsfetch" "github.com/OpenSlides/openslides-go/environment" @@ -108,6 +107,9 @@ func Cmd() *cobra.Command { rawFilter := cmd.Flags().String("filter-raw", "", "complex filter in JSON format with operators (=, !=, >, <, >=, <=, ~=)") exists := cmd.Flags().Bool("exists", false, "check only for existence (requires --filter or --filter-raw)") + // Filter and raw filter flags are mutually exclusive + cmd.MarkFlagsMutuallyExclusive("filter", "filter-raw") + cmd.RunE = func(cmd *cobra.Command, args []string) error { logger.Info("=== GET COLLECTION ===") @@ -119,55 +121,49 @@ func Cmd() *cobra.Command { return fmt.Errorf("--exists requires --filter or --filter-raw") } - if len(*filter) > 0 && *rawFilter != "" { - return fmt.Errorf("cannot use both --filter and --filter-raw") + // Build database config + dbConfig := &pb.DatabaseConfig{ + Host: *postgresHost, + Port: *postgresPort, + User: *postgresUser, + Database: *postgresDatabase, + PasswordFile: *postgresPasswordFile, } - // Parse raw filter if provided - var parsedRawFilter *RawFilter - if *rawFilter != "" { - parsedRawFilter = &RawFilter{} - if err := json.Unmarshal([]byte(*rawFilter), parsedRawFilter); err != nil { - return fmt.Errorf("parsing filter-raw: %w", err) - } + // Build query params + queryParams := &pb.QueryParams{ + Collection: collection, + Fields: *fields, + ExistsOnly: *exists, } - // Create environment map for datastore connection - envMap := map[string]string{ - constants.EnvDatabaseHost: *postgresHost, - constants.EnvDatabasePort: *postgresPort, - constants.EnvDatabaseUser: *postgresUser, - constants.EnvDatabaseName: *postgresDatabase, - constants.EnvDatabasePasswordFile: *postgresPasswordFile, - constants.EnvOpenSlidesDevelopment: constants.DevelopmentModeDisabled, + // Set filter (mutually exclusive) + if len(*filter) > 0 { + queryParams.SimpleFilter = *filter + } else if *rawFilter != "" { + queryParams.RawFilter = []byte(*rawFilter) } - // Initialize datastore flow - env := environment.ForTests(envMap) - dsFlow, err := datastore.NewFlowPostgres(env, nil) + // Execute query using exported function + result, err := ExecuteGetCollection(context.Background(), dbConfig, queryParams) if err != nil { - return fmt.Errorf("creating datastore flow: %w", err) + return fmt.Errorf("executing query: %w", err) } - logger.Info("Connected to database successfully") - - // Create fetcher - fetch := dsfetch.New(dsFlow) - ctx := context.Background() - - // Execute query - result, err := executeQuery(ctx, fetch, collection, *filter, parsedRawFilter, *fields, *exists) - if err != nil { - return fmt.Errorf("executing query: %w", err) + if !result.Success && result.Error != "" { + return fmt.Errorf("query failed: %s", result.Error) } - // Output as JSON - jsonBytes, err := json.MarshalIndent(result, "", " ") - if err != nil { - return fmt.Errorf("marshaling result to JSON: %w", err) + // Print result + switch r := result.Result.(type) { + case *pb.GetCollectionResponse_Exists: + fmt.Printf("%v\n", r.Exists) + case *pb.GetCollectionResponse_JsonData: + fmt.Println(string(r.JsonData)) + default: + return fmt.Errorf("unexpected result type") } - fmt.Println(string(jsonBytes)) logger.Info("Query completed successfully") return nil } @@ -175,6 +171,98 @@ func Cmd() *cobra.Command { return cmd } +// ExecuteGetCollection executes a datastore query and returns the result. +func ExecuteGetCollection(ctx context.Context, dbConfig *pb.DatabaseConfig, params *pb.QueryParams) (*pb.GetCollectionResponse, error) { + logger.Debug("Executing get models query for collection: %s", params.Collection) + + // Validate required fields + if dbConfig == nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: "database config is required", + }, nil + } + if params == nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: "query params are required", + }, nil + } + + // Parse raw filter if provided + var parsedRawFilter *RawFilter + if len(params.RawFilter) > 0 { + parsedRawFilter = &RawFilter{} + if err := json.Unmarshal(params.RawFilter, parsedRawFilter); err != nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: fmt.Sprintf("parsing filter-raw: %v", err), + }, nil + } + } + + // Create environment map for datastore connection + envMap := map[string]string{ + constants.EnvDatabaseHost: dbConfig.Host, + constants.EnvDatabasePort: dbConfig.Port, + constants.EnvDatabaseUser: dbConfig.User, + constants.EnvDatabaseName: dbConfig.Database, + constants.EnvDatabasePasswordFile: dbConfig.PasswordFile, + constants.EnvOpenSlidesDevelopment: constants.DevelopmentModeDisabled, + } + + // Initialize datastore flow + env := environment.ForTests(envMap) + dsFlow, err := datastore.NewFlowPostgres(env) + if err != nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: fmt.Sprintf("creating datastore flow: %v", err), + }, nil + } + + logger.Info("Connected to database successfully") + + // Create fetcher + fetch := dsfetch.New(dsFlow) + + // Execute query + rawResult, err := executeQuery(ctx, fetch, params.Collection, params.SimpleFilter, parsedRawFilter, params.Fields, params.ExistsOnly) + if err != nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: fmt.Sprintf("executing query: %v", err), + }, nil + } + + // Format result based on query type + response := &pb.GetCollectionResponse{Success: true} + + if params.ExistsOnly { + // Result is a boolean + exists, ok := rawResult.(bool) + if !ok { + return &pb.GetCollectionResponse{ + Success: false, + Error: fmt.Sprintf("expected bool result for exists query, got %T", rawResult), + }, nil + } + response.Result = &pb.GetCollectionResponse_Exists{Exists: exists} + } else { + // Result is data - marshal to JSON bytes + jsonBytes, err := json.MarshalIndent(rawResult, "", " ") + if err != nil { + return &pb.GetCollectionResponse{ + Success: false, + Error: fmt.Sprintf("marshaling result to JSON: %v", err), + }, nil + } + response.Result = &pb.GetCollectionResponse_JsonData{JsonData: jsonBytes} + } + + return response, nil +} + func executeQuery(ctx context.Context, fetch *dsfetch.Fetch, collection string, filter map[string]string, rawFilter *RawFilter, fields []string, existsOnly bool) (any, error) { logger.Debug("Executing query for collection: %s", collection) @@ -537,13 +625,13 @@ func matchesCondition(record map[string]any, field, operator string, value any) case "!=": return !reflect.DeepEqual(recordValue, value) case ">": - return compareNumeric(recordValue, value, func(a, b float64) bool { return cmp.Compare(a, b) > 0 }) + return compareNumeric(recordValue, value, func(a, b float64) bool { return a > b }) case "<": - return compareNumeric(recordValue, value, func(a, b float64) bool { return cmp.Compare(a, b) < 0 }) + return compareNumeric(recordValue, value, func(a, b float64) bool { return a < b }) case ">=": - return compareNumeric(recordValue, value, func(a, b float64) bool { return cmp.Compare(a, b) >= 0 }) + return compareNumeric(recordValue, value, func(a, b float64) bool { return a >= b }) case "<=": - return compareNumeric(recordValue, value, func(a, b float64) bool { return cmp.Compare(a, b) <= 0 }) + return compareNumeric(recordValue, value, func(a, b float64) bool { return a <= b }) case "~=": return matchesRegex(recordValue, value) default: diff --git a/internal/manage/actions/migrations/migrations.go b/internal/manage/actions/migrations/migrations.go index 5487f9d..d4773ef 100644 --- a/internal/manage/actions/migrations/migrations.go +++ b/internal/manage/actions/migrations/migrations.go @@ -13,6 +13,7 @@ import ( "github.com/OpenSlides/openslides-cli/internal/logger" "github.com/OpenSlides/openslides-cli/internal/manage/client" "github.com/OpenSlides/openslides-cli/internal/utils" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" ) const ( @@ -133,19 +134,30 @@ func createMigrationCmd(name, description string, withProgressTracking bool) *co cl := client.New(*address, authPassword) - // Execute migration command - response, err := sendMigrationCommand(cl, name) + response, err := ExecuteMigrationCommand(cl, name) if err != nil { return fmt.Errorf("executing migration command: %w", err) } - // Handle response based on whether progress tracking is enabled - if withProgressTracking && progressInterval != nil && *progressInterval > 0 && response.Running() { - return trackMigrationProgress(cl, response, *progressInterval, name) + if withProgressTracking && progressInterval != nil && *progressInterval > 0 && (Running(response) || Finalizing(response)) { + fmt.Println("Progress:") + + var stopCondition func(*pb.MigrationsResponse) bool + if name == "finalize" { + stopCondition = func(r *pb.MigrationsResponse) bool { return !Running(r) && !Finalizing(r) } + } else { + stopCondition = func(r *pb.MigrationsResponse) bool { return !Running(r) } + } + + printCallback := func(update *pb.MigrationsProgressResponse) error { + fmt.Print(update.Output) + return nil + } + + return TrackMigrationProgress(cl, *progressInterval, stopCondition, printCallback) } - // No progress tracking - just print output - output, err := response.GetOutput(name) + output, err := GetOutput(response, name) if err != nil { return fmt.Errorf("formatting output: %w", err) } @@ -157,22 +169,29 @@ func createMigrationCmd(name, description string, withProgressTracking bool) *co return cmd } -// sendMigrationCommand sends a migration command with retry logic -func sendMigrationCommand(cl *client.Client, command string) (*MigrationResponse, error) { - logger.Debug("Sending migration command: %s", command) +// Internal type for HTTP response (stats is JSON object) +type migrationsHTTPResponse struct { + Success bool `json:"success"` + Status string `json:"status"` + Output string `json:"output"` + Exception string `json:"exception"` + Stats json.RawMessage `json:"stats"` +} + +// ExecuteMigrationCommand sends a migration command to the backend with retry logic. +func ExecuteMigrationCommand(cl *client.Client, command string) (*pb.MigrationsResponse, error) { + logger.Debug("Executing migration command: %s", command) ctx, cancel := context.WithTimeout(context.Background(), constants.MigrationTotalTimeout) defer cancel() var lastErr error - for attempt := 0; attempt < constants.MigrationMaxRetries; attempt++ { - // Check if context expired + for attempt := range constants.MigrationMaxRetries { if ctx.Err() != nil { return nil, fmt.Errorf("migration command timed out after %v: %w", constants.MigrationTotalTimeout, ctx.Err()) } - // Wait before retry (except first attempt) if attempt > 0 { logger.Warn("Retry attempt %d/%d after %v (previous error: %v)", attempt, constants.MigrationMaxRetries, constants.MigrationRetryDelay, lastErr) @@ -185,7 +204,6 @@ func sendMigrationCommand(cl *client.Client, command string) (*MigrationResponse } } - // Send request resp, err := cl.SendMigrations(command) if err != nil { lastErr = fmt.Errorf("sending request: %w", err) @@ -196,7 +214,6 @@ func sendMigrationCommand(cl *client.Client, command string) (*MigrationResponse return nil, lastErr } - // Check response body, err := client.CheckResponse(resp) if err != nil { lastErr = err @@ -207,50 +224,63 @@ func sendMigrationCommand(cl *client.Client, command string) (*MigrationResponse return nil, lastErr } - // Parse response - var migrationResp MigrationResponse - if err := json.Unmarshal(body, &migrationResp); err != nil { + var httpResp migrationsHTTPResponse + if err := json.Unmarshal(body, &httpResp); err != nil { logger.Error("Failed to unmarshal migration response: %v", err) return nil, fmt.Errorf("unmarshalling response: %w", err) } - logger.Debug("Migration response - Success: %v, Status: %s, Running: %v", - migrationResp.Success, migrationResp.Status, migrationResp.Running()) + migrationResp := &pb.MigrationsResponse{ + Success: httpResp.Success, + Status: httpResp.Status, + Output: httpResp.Output, + Exception: httpResp.Exception, + Stats: string(httpResp.Stats), + } + + logger.Debug("Migration response - Success: %v, Status: %s, Running: %v, Finalizing: %v", + migrationResp.Success, migrationResp.Status, Running(migrationResp), Finalizing(migrationResp)) - return &migrationResp, nil + return migrationResp, nil } return nil, fmt.Errorf("migration command failed after %d retries: %w", constants.MigrationMaxRetries, lastErr) } -// trackMigrationProgress polls migration progress until completion -func trackMigrationProgress(cl *client.Client, initialResponse *MigrationResponse, interval time.Duration, command string) error { - fmt.Println("Progress:") +// TrackMigrationProgress polls migration progress and sends updates to the callback. +func TrackMigrationProgress( + cl *client.Client, + interval time.Duration, + stopCondition func(*pb.MigrationsResponse) bool, + callback func(*pb.MigrationsProgressResponse) error, +) error { logger.Debug("Starting progress tracking with interval: %v", interval) for { time.Sleep(interval) - response, err := sendMigrationCommand(cl, "progress") + response, err := ExecuteMigrationCommand(cl, "progress") if err != nil { return fmt.Errorf("checking progress: %w", err) } - // Print progress output - output, err := response.GetOutput("progress") - if err != nil { - return fmt.Errorf("formatting progress output: %w", err) + update := &pb.MigrationsProgressResponse{ + Output: response.Output, + Running: Running(response) || Finalizing(response), + Success: response.Success, + Exception: response.Exception, + } + + if err := callback(update); err != nil { + return fmt.Errorf("progress callback error: %w", err) } - fmt.Print(output) - // Check if migration failed - if response.Faulty() { + if Faulty(response) { logger.Error("Migration command failed") return fmt.Errorf("migration failed: %s", response.Exception) } - // Check if migration completed - if !response.Running() { + if stopCondition(response) { logger.Info("Migration completed") break } @@ -259,6 +289,61 @@ func trackMigrationProgress(cl *client.Client, initialResponse *MigrationRespons return nil } +// GetOutput returns the formatted output for the migration response +func GetOutput(mr *pb.MigrationsResponse, command string) (string, error) { + if Faulty(mr) { + return formatAll(mr) + } + if command == "stats" { + return FormatStats(mr.Stats) + } + return mr.Output, nil +} + +// FormatStats formats the stats bytes into a readable string (exported for gRPC use) +func FormatStats(stats string) (string, error) { + if stats == "" { + return "", nil + } + + var statsMap map[string]any + if err := json.Unmarshal([]byte(stats), &statsMap); err != nil { + return "", fmt.Errorf("unmarshalling stats: %w", err) + } + + var sb strings.Builder + for _, field := range constants.MigrationStatsFields { + if value, ok := statsMap[field]; ok { + fmt.Fprintf(&sb, "%s: %v\n", field, value) + } + } + + return sb.String(), nil +} + +// formatAll formats all response fields +func formatAll(mr *pb.MigrationsResponse) (string, error) { + return fmt.Sprintf("Success: %v\nStatus: %s\nOutput: %s\nException: %s\n", + mr.Success, mr.Status, mr.Output, mr.Exception), nil +} + +// Faulty returns true if the migration failed +func Faulty(mr *pb.MigrationsResponse) bool { + return !mr.Success || mr.Exception != "" || + mr.Status == constants.MigrationStatusFailed || + mr.Status == constants.FinalizationStatusFailed +} + +// Running returns true if the migration is currently in progress +func Running(mr *pb.MigrationsResponse) bool { + return mr.Status == constants.MigrationStatusRunning +} + +// Finalizing returns true if the migration finalization is currently in progress +func Finalizing(mr *pb.MigrationsResponse) bool { + return mr.Status == constants.FinalizationStatusRunning +} + // isRetryableError determines if an error should trigger a retry func isRetryableError(err error) bool { if err == nil { @@ -296,56 +381,3 @@ func isRetryableError(err error) bool { return false } - -// MigrationResponse represents the response from a migration command -type MigrationResponse struct { - Success bool `json:"success"` - Status string `json:"status"` - Output string `json:"output"` - Exception string `json:"exception"` - Stats json.RawMessage `json:"stats"` -} - -// GetOutput returns the formatted output for the migration response -func (mr *MigrationResponse) GetOutput(command string) (string, error) { - if mr.Faulty() { - return mr.formatAll() - } - if command == "stats" { - return mr.formatStats() - } - return mr.Output, nil -} - -// formatStats formats the stats JSON into a readable string -func (mr *MigrationResponse) formatStats() (string, error) { - var stats map[string]any - if err := json.Unmarshal(mr.Stats, &stats); err != nil { - return "", fmt.Errorf("unmarshalling stats: %w", err) - } - - var sb strings.Builder - for _, field := range constants.MigrationStatsFields { - if value, ok := stats[field]; ok { - fmt.Fprintf(&sb, "%s: %v\n", field, value) - } - } - - return sb.String(), nil -} - -// formatAll formats all response fields -func (mr *MigrationResponse) formatAll() (string, error) { - return fmt.Sprintf("Success: %v\nStatus: %s\nOutput: %s\nException: %s\n", - mr.Success, mr.Status, mr.Output, mr.Exception), nil -} - -// Faulty returns true if the migration failed -func (mr *MigrationResponse) Faulty() bool { - return !mr.Success || mr.Exception != "" -} - -// Running returns true if the migration is currently in progress -func (mr *MigrationResponse) Running() bool { - return mr.Status == constants.MigrationStatusRunning -} diff --git a/internal/manage/actions/migrations/migrations_test.go b/internal/manage/actions/migrations/migrations_test.go index 346822d..9d4298d 100644 --- a/internal/manage/actions/migrations/migrations_test.go +++ b/internal/manage/actions/migrations/migrations_test.go @@ -6,40 +6,41 @@ import ( "testing" "github.com/OpenSlides/openslides-cli/internal/constants" + pb "github.com/OpenSlides/openslides-cli/proto/osmanage" ) -func TestMigrationResponse_Faulty(t *testing.T) { +func TestFaulty(t *testing.T) { tests := []struct { name string - resp MigrationResponse + resp *pb.MigrationsResponse wantFault bool }{ { "success no exception", - MigrationResponse{Success: true, Exception: ""}, + &pb.MigrationsResponse{Success: true, Exception: ""}, false, }, { "failure", - MigrationResponse{Success: false, Exception: ""}, + &pb.MigrationsResponse{Success: false, Exception: ""}, true, }, { "success with exception", - MigrationResponse{Success: true, Exception: "error occurred"}, + &pb.MigrationsResponse{Success: true, Exception: "error occurred"}, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := tt.resp.Faulty(); got != tt.wantFault { + if got := Faulty(tt.resp); got != tt.wantFault { t.Errorf("Faulty() = %v, want %v", got, tt.wantFault) } }) } } -func TestMigrationResponse_Running(t *testing.T) { +func TestRunning(t *testing.T) { tests := []struct { name string status string @@ -52,21 +53,21 @@ func TestMigrationResponse_Running(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resp := MigrationResponse{Status: tt.status} - if got := resp.Running(); got != tt.running { + resp := &pb.MigrationsResponse{Status: tt.status} + if got := Running(resp); got != tt.running { t.Errorf("Running() = %v, want %v", got, tt.running) } }) } } -func TestMigrationResponse_GetOutput(t *testing.T) { +func TestGetOutput(t *testing.T) { t.Run("normal output", func(t *testing.T) { - resp := MigrationResponse{ + resp := &pb.MigrationsResponse{ Success: true, Output: "Migration completed", } - output, err := resp.GetOutput("migrate") + output, err := GetOutput(resp, "migrate") if err != nil { t.Errorf("GetOutput() error = %v", err) } @@ -86,17 +87,16 @@ func TestMigrationResponse_GetOutput(t *testing.T) { "status": "finalization_required", } statsJSON, _ := json.Marshal(stats) - resp := MigrationResponse{ + resp := &pb.MigrationsResponse{ Success: true, - Stats: statsJSON, + Stats: string(statsJSON), } - output, err := resp.GetOutput("stats") + output, err := GetOutput(resp, "stats") if err != nil { t.Errorf("GetOutput() error = %v", err) } // Verify all expected fields are present - // Using subset of MigrationStatsFields for validation expectedFields := []string{ "current_migration_index", "target_migration_index", @@ -112,11 +112,11 @@ func TestMigrationResponse_GetOutput(t *testing.T) { }) t.Run("faulty response", func(t *testing.T) { - resp := MigrationResponse{ + resp := &pb.MigrationsResponse{ Success: false, Exception: "Migration failed", } - output, err := resp.GetOutput("migrate") + output, err := GetOutput(resp, "migrate") if err != nil { t.Errorf("GetOutput() error = %v", err) } @@ -126,7 +126,7 @@ func TestMigrationResponse_GetOutput(t *testing.T) { }) } -func TestMigrationResponse_FormatStats(t *testing.T) { +func TestFormatStats(t *testing.T) { t.Run("ordered output", func(t *testing.T) { stats := map[string]any{ "status": "finalization_required", @@ -138,11 +138,10 @@ func TestMigrationResponse_FormatStats(t *testing.T) { "fully_migrated_positions": 0, } statsJSON, _ := json.Marshal(stats) - resp := &MigrationResponse{Stats: statsJSON} - output, err := resp.formatStats() + output, err := FormatStats(string(statsJSON)) if err != nil { - t.Errorf("formatStats() error = %v", err) + t.Errorf("FormatStats() error = %v", err) } // Verify order: current_migration_index should come before status @@ -176,11 +175,10 @@ func TestMigrationResponse_FormatStats(t *testing.T) { "current_migration_index": 70, } statsJSON, _ := json.Marshal(stats) - resp := &MigrationResponse{Stats: statsJSON} - output, err := resp.formatStats() + output, err := FormatStats(string(statsJSON)) if err != nil { - t.Errorf("formatStats() error = %v", err) + t.Errorf("FormatStats() error = %v", err) } // Should still include present fields @@ -193,9 +191,9 @@ func TestMigrationResponse_FormatStats(t *testing.T) { }) t.Run("invalid JSON", func(t *testing.T) { - resp := &MigrationResponse{Stats: json.RawMessage("invalid json")} + invalidJSON := "invalid json" - _, err := resp.formatStats() + _, err := FormatStats(invalidJSON) if err == nil { t.Error("Expected error for invalid JSON") } diff --git a/proto/osmanage.proto b/proto/osmanage.proto new file mode 100644 index 0000000..4da04de --- /dev/null +++ b/proto/osmanage.proto @@ -0,0 +1,304 @@ +syntax = "proto3"; + +package osmanage; + +option go_package = "github.com/OpenSlides/openslides-cli/proto/osmanage"; + +service OsmanageService { + // Instance filesystem operations + rpc SetupInstance(InstanceConfigRequest) returns (InstanceConfigResponse); + rpc ConfigInstance(InstanceConfigRequest) returns (InstanceConfigResponse); + rpc CreateInstance(CreateInstanceRequest) returns (CreateInstanceResponse); + rpc RemoveInstance(RemoveInstanceRequest) returns (RemoveInstanceResponse); + + // k8s actions + rpc GetNamespaceExists(GetNamespaceExistsRequest) returns (GetNamespaceExistsResponse); + rpc GetClusterStatus(GetClusterStatusRequest) returns (GetClusterStatusResponse); + rpc GetServiceAddress(GetServiceAddressRequest) returns (GetServiceAddressResponse); + rpc GetInstanceHealth(GetInstanceHealthRequest) returns (stream GetInstanceHealthResponse); + rpc GetInstanceStatus(GetInstanceStatusRequest) returns (GetInstanceStatusResponse); + rpc UpdateBackendmanage(UpdateBackendmanageRequest) returns (stream UpdateBackendmanageResponse); + rpc UpdateInstance(UpdateInstanceRequest) returns (stream UpdateInstanceResponse); + rpc ScaleService(ScaleServiceRequest) returns (stream ScaleServiceResponse); + rpc StartInstance(StartInstanceRequest) returns (stream StartInstanceResponse); + rpc StopInstance(StopInstanceRequest) returns (stream StopInstanceResponse); + + // datastore query action (via openslides-go) + rpc GetCollection(GetCollectionRequest) returns (GetCollectionResponse); + + // migrations actions + + // server side streaming + rpc MigrationsMigrate(MigrationsRequest) returns (stream MigrationsProgressResponse); + rpc MigrationsFinalize(MigrationsRequest) returns (stream MigrationsProgressResponse); + + // unary gRPC calls + rpc MigrationsReset(MigrationsRequest) returns (MigrationsResponse); + rpc MigrationsClearCollectionfieldTables(MigrationsRequest) returns (MigrationsResponse); + rpc MigrationsStats(MigrationsRequest) returns (MigrationsResponse); + rpc MigrationsProgress(MigrationsRequest) returns (MigrationsResponse); + + // generic manage action via backendmanage client + rpc SendManageAction(SendManageActionRequest) returns (SendManageActionResponse); +} + +message InstanceConfigRequest { + string instance_dir = 1; + string stack_template_path = 2; // path to template file or directory on server node + repeated bytes configs = 3; // JSON-encoded YAML configs (merged last wins on conflict) + bool force = 4; + bool clean = 5; +} + +message InstanceConfigResponse { + bool success = 1; + string error = 2; +} + +message CreateInstanceRequest { + string instance_dir = 1; + string db_password = 2; + string superadmin_password = 3; +} + +message CreateInstanceResponse { + bool success = 1; + string error = 2; +} + +message RemoveInstanceRequest { + string instance_dir = 1; + bool force = 2; +} + +message RemoveInstanceResponse { + bool success = 1; + string error = 2; +} + +message GetNamespaceExistsRequest { + string instance_url = 1; + string kubeconfig = 2; +} + +message GetNamespaceExistsResponse { + bool exists = 1; + string error = 2; +} + +message GetClusterStatusRequest { + string kubeconfig = 1; // Empty = use default +} + +message GetClusterStatusResponse { + string status = 1; + int32 total_nodes = 2; + int32 ready_nodes = 3; +} + +message GetServiceAddressRequest { + string instance_url = 1; + string service_name = 2; + string kubeconfig = 3; // Empty = use default +} + +message GetServiceAddressResponse { + string address = 1; // Format: "IP:Port" + string error = 2; // Empty on success +} + +message GetInstanceHealthRequest { + string instance_url = 1; + string kubeconfig = 2; + bool wait = 3; + int32 timeout_seconds = 4; +} + +message GetInstanceHealthResponse { + bool healthy = 1; + int32 ready_pods = 2; + int32 total_pods = 3; + int32 active_pods = 4; + repeated PodStatus pods = 5; + bool complete = 6; // True on final message (healthy or timeout) + string error = 7; // Error message if failed +} + +message PodStatus { + string name = 1; + string phase = 2; // Running, Pending, etc. + bool ready = 3; +} + +message ContainerStatus { + string name = 1; + string tag = 2; + string container_registry = 3; + bool ready = 4; + bool started = 5; + map env_vars = 6; +} + +message InstancePodStatus { + string name = 1; + string service = 2; + string node = 3; + repeated ContainerStatus containers = 4; +} + +message GetInstanceStatusRequest { + string instance_url = 1; + string kubeconfig = 2; +} + +message GetInstanceStatusResponse { + repeated InstancePodStatus pods = 1; + map service_counts = 2; + bool namespace_exists = 3; +} + +message UpdateBackendmanageRequest { + string instance_url = 1; + string kubeconfig = 2; + string tag = 3; + string container_registry = 4; + int32 timeout_seconds = 5; +} + +message UpdateBackendmanageResponse { + bool complete = 1; + string error = 2; + // Deployment rollout progress + int32 ready_replicas = 3; + int32 desired_replicas = 4; +} + +message UpdateInstanceRequest { + string instance_dir = 1; + string kubeconfig = 2; + bool skip_ready_check = 3; + int32 timeout_seconds = 4; +} + +message UpdateInstanceResponse { + bool healthy = 1; + int32 ready_pods = 2; + int32 total_pods = 3; + int32 active_pods = 4; + repeated PodStatus pods = 5; + bool complete = 6; + string error = 7; + bool inactive = 8; +} + +message ScaleServiceRequest { + string instance_dir = 1; + string service = 2; + string kubeconfig = 3; + bool skip_ready_check = 4; + int32 timeout_seconds = 5; +} + +message ScaleServiceResponse { + bool complete = 1; + string error = 2; + // Deployment rollout progress + int32 ready_replicas = 3; + int32 desired_replicas = 4; +} + +message StartInstanceRequest { + string instance_dir = 1; + string kubeconfig = 2; + bool skip_ready_check = 3; + int32 timeout_seconds = 4; + map labels = 5; +} + +message StartInstanceResponse { + bool healthy = 1; + int32 ready_pods = 2; + int32 total_pods = 3; + int32 active_pods = 4; + repeated PodStatus pods = 5; + bool complete = 6; + string error = 7; +} + +message StopInstanceRequest { + string instance_dir = 1; + string kubeconfig = 2; + int32 timeout_seconds = 3; +} + +message StopInstanceResponse { + bool complete = 1; + string error = 2; + int32 elapsed_seconds = 3; +} + +// PostgreSQL database connection configuration for datastore access +message DatabaseConfig { + string host = 1; + string port = 2; + string user = 3; + string database = 4; + string password_file = 5; +} + +// Query parameters for filtering and selecting datastore models +message QueryParams { + string collection = 1; // Collection to query: "user", "meeting", or "organization" + repeated string fields = 2; // Fields to return (empty = all fields) + bool exists_only = 3; // Return only existence check (bool) instead of data + map simple_filter = 4; // Simple key=value filters (AND'd together) + bytes raw_filter = 5; // Complex JSON filter (mutually exclusive with simple_filter) +} + +message GetCollectionRequest { + DatabaseConfig db_config = 1; // Database connection configuration + QueryParams query_params = 2; // Query filtering and field selection +} + +message GetCollectionResponse { + bool success = 1; + string error = 2; // Empty on success + + oneof result { + bool exists = 3; // For exists_only queries + bytes json_data = 4; // JSON-encoded model data + } +} + +message MigrationsRequest { + string address_backendmanage = 1; + string password_file_path = 2; +} + +message MigrationsResponse { + bool success = 1; + string status = 2; + string output = 3; + string exception = 4; + string stats = 5; +} + +message MigrationsProgressResponse { + string output = 1; + bool running = 2; + bool success = 3; + string exception = 4; +} + +message SendManageActionRequest { + string address_backendmanage = 1; + string password_file_path = 2; + string action = 3; + bytes payload = 4; +} + +message SendManageActionResponse { + bool success = 1; + string error = 2; + bytes body = 3; +} \ No newline at end of file diff --git a/proto/osmanage/osmanage.pb.go b/proto/osmanage/osmanage.pb.go new file mode 100644 index 0000000..eb442bd --- /dev/null +++ b/proto/osmanage/osmanage.pb.go @@ -0,0 +1,2950 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc v4.25.1 +// source: proto/osmanage.proto + +package osmanage + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type InstanceConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceDir string `protobuf:"bytes,1,opt,name=instance_dir,json=instanceDir,proto3" json:"instance_dir,omitempty"` + StackTemplatePath string `protobuf:"bytes,2,opt,name=stack_template_path,json=stackTemplatePath,proto3" json:"stack_template_path,omitempty"` // path to template file or directory on server node + Configs [][]byte `protobuf:"bytes,3,rep,name=configs,proto3" json:"configs,omitempty"` // JSON-encoded YAML configs (merged last wins on conflict) + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + Clean bool `protobuf:"varint,5,opt,name=clean,proto3" json:"clean,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InstanceConfigRequest) Reset() { + *x = InstanceConfigRequest{} + mi := &file_proto_osmanage_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InstanceConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstanceConfigRequest) ProtoMessage() {} + +func (x *InstanceConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstanceConfigRequest.ProtoReflect.Descriptor instead. +func (*InstanceConfigRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{0} +} + +func (x *InstanceConfigRequest) GetInstanceDir() string { + if x != nil { + return x.InstanceDir + } + return "" +} + +func (x *InstanceConfigRequest) GetStackTemplatePath() string { + if x != nil { + return x.StackTemplatePath + } + return "" +} + +func (x *InstanceConfigRequest) GetConfigs() [][]byte { + if x != nil { + return x.Configs + } + return nil +} + +func (x *InstanceConfigRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *InstanceConfigRequest) GetClean() bool { + if x != nil { + return x.Clean + } + return false +} + +type InstanceConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InstanceConfigResponse) Reset() { + *x = InstanceConfigResponse{} + mi := &file_proto_osmanage_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InstanceConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstanceConfigResponse) ProtoMessage() {} + +func (x *InstanceConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstanceConfigResponse.ProtoReflect.Descriptor instead. +func (*InstanceConfigResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{1} +} + +func (x *InstanceConfigResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *InstanceConfigResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type CreateInstanceRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceDir string `protobuf:"bytes,1,opt,name=instance_dir,json=instanceDir,proto3" json:"instance_dir,omitempty"` + DbPassword string `protobuf:"bytes,2,opt,name=db_password,json=dbPassword,proto3" json:"db_password,omitempty"` + SuperadminPassword string `protobuf:"bytes,3,opt,name=superadmin_password,json=superadminPassword,proto3" json:"superadmin_password,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateInstanceRequest) Reset() { + *x = CreateInstanceRequest{} + mi := &file_proto_osmanage_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateInstanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateInstanceRequest) ProtoMessage() {} + +func (x *CreateInstanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateInstanceRequest.ProtoReflect.Descriptor instead. +func (*CreateInstanceRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateInstanceRequest) GetInstanceDir() string { + if x != nil { + return x.InstanceDir + } + return "" +} + +func (x *CreateInstanceRequest) GetDbPassword() string { + if x != nil { + return x.DbPassword + } + return "" +} + +func (x *CreateInstanceRequest) GetSuperadminPassword() string { + if x != nil { + return x.SuperadminPassword + } + return "" +} + +type CreateInstanceResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateInstanceResponse) Reset() { + *x = CreateInstanceResponse{} + mi := &file_proto_osmanage_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateInstanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateInstanceResponse) ProtoMessage() {} + +func (x *CreateInstanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateInstanceResponse.ProtoReflect.Descriptor instead. +func (*CreateInstanceResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateInstanceResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *CreateInstanceResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type RemoveInstanceRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceDir string `protobuf:"bytes,1,opt,name=instance_dir,json=instanceDir,proto3" json:"instance_dir,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemoveInstanceRequest) Reset() { + *x = RemoveInstanceRequest{} + mi := &file_proto_osmanage_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemoveInstanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveInstanceRequest) ProtoMessage() {} + +func (x *RemoveInstanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveInstanceRequest.ProtoReflect.Descriptor instead. +func (*RemoveInstanceRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{4} +} + +func (x *RemoveInstanceRequest) GetInstanceDir() string { + if x != nil { + return x.InstanceDir + } + return "" +} + +func (x *RemoveInstanceRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +type RemoveInstanceResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemoveInstanceResponse) Reset() { + *x = RemoveInstanceResponse{} + mi := &file_proto_osmanage_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemoveInstanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveInstanceResponse) ProtoMessage() {} + +func (x *RemoveInstanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveInstanceResponse.ProtoReflect.Descriptor instead. +func (*RemoveInstanceResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{5} +} + +func (x *RemoveInstanceResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *RemoveInstanceResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type GetNamespaceExistsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceUrl string `protobuf:"bytes,1,opt,name=instance_url,json=instanceUrl,proto3" json:"instance_url,omitempty"` + Kubeconfig string `protobuf:"bytes,2,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetNamespaceExistsRequest) Reset() { + *x = GetNamespaceExistsRequest{} + mi := &file_proto_osmanage_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetNamespaceExistsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNamespaceExistsRequest) ProtoMessage() {} + +func (x *GetNamespaceExistsRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNamespaceExistsRequest.ProtoReflect.Descriptor instead. +func (*GetNamespaceExistsRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{6} +} + +func (x *GetNamespaceExistsRequest) GetInstanceUrl() string { + if x != nil { + return x.InstanceUrl + } + return "" +} + +func (x *GetNamespaceExistsRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +type GetNamespaceExistsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetNamespaceExistsResponse) Reset() { + *x = GetNamespaceExistsResponse{} + mi := &file_proto_osmanage_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetNamespaceExistsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNamespaceExistsResponse) ProtoMessage() {} + +func (x *GetNamespaceExistsResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNamespaceExistsResponse.ProtoReflect.Descriptor instead. +func (*GetNamespaceExistsResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{7} +} + +func (x *GetNamespaceExistsResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +func (x *GetNamespaceExistsResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type GetClusterStatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Kubeconfig string `protobuf:"bytes,1,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` // Empty = use default + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetClusterStatusRequest) Reset() { + *x = GetClusterStatusRequest{} + mi := &file_proto_osmanage_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetClusterStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClusterStatusRequest) ProtoMessage() {} + +func (x *GetClusterStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClusterStatusRequest.ProtoReflect.Descriptor instead. +func (*GetClusterStatusRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{8} +} + +func (x *GetClusterStatusRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +type GetClusterStatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + TotalNodes int32 `protobuf:"varint,2,opt,name=total_nodes,json=totalNodes,proto3" json:"total_nodes,omitempty"` + ReadyNodes int32 `protobuf:"varint,3,opt,name=ready_nodes,json=readyNodes,proto3" json:"ready_nodes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetClusterStatusResponse) Reset() { + *x = GetClusterStatusResponse{} + mi := &file_proto_osmanage_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetClusterStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClusterStatusResponse) ProtoMessage() {} + +func (x *GetClusterStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClusterStatusResponse.ProtoReflect.Descriptor instead. +func (*GetClusterStatusResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{9} +} + +func (x *GetClusterStatusResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *GetClusterStatusResponse) GetTotalNodes() int32 { + if x != nil { + return x.TotalNodes + } + return 0 +} + +func (x *GetClusterStatusResponse) GetReadyNodes() int32 { + if x != nil { + return x.ReadyNodes + } + return 0 +} + +type GetServiceAddressRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceUrl string `protobuf:"bytes,1,opt,name=instance_url,json=instanceUrl,proto3" json:"instance_url,omitempty"` + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + Kubeconfig string `protobuf:"bytes,3,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` // Empty = use default + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetServiceAddressRequest) Reset() { + *x = GetServiceAddressRequest{} + mi := &file_proto_osmanage_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetServiceAddressRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceAddressRequest) ProtoMessage() {} + +func (x *GetServiceAddressRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceAddressRequest.ProtoReflect.Descriptor instead. +func (*GetServiceAddressRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{10} +} + +func (x *GetServiceAddressRequest) GetInstanceUrl() string { + if x != nil { + return x.InstanceUrl + } + return "" +} + +func (x *GetServiceAddressRequest) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *GetServiceAddressRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +type GetServiceAddressResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Format: "IP:Port" + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` // Empty on success + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetServiceAddressResponse) Reset() { + *x = GetServiceAddressResponse{} + mi := &file_proto_osmanage_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetServiceAddressResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceAddressResponse) ProtoMessage() {} + +func (x *GetServiceAddressResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceAddressResponse.ProtoReflect.Descriptor instead. +func (*GetServiceAddressResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{11} +} + +func (x *GetServiceAddressResponse) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *GetServiceAddressResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type GetInstanceHealthRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceUrl string `protobuf:"bytes,1,opt,name=instance_url,json=instanceUrl,proto3" json:"instance_url,omitempty"` + Kubeconfig string `protobuf:"bytes,2,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + Wait bool `protobuf:"varint,3,opt,name=wait,proto3" json:"wait,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,4,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInstanceHealthRequest) Reset() { + *x = GetInstanceHealthRequest{} + mi := &file_proto_osmanage_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInstanceHealthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInstanceHealthRequest) ProtoMessage() {} + +func (x *GetInstanceHealthRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInstanceHealthRequest.ProtoReflect.Descriptor instead. +func (*GetInstanceHealthRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{12} +} + +func (x *GetInstanceHealthRequest) GetInstanceUrl() string { + if x != nil { + return x.InstanceUrl + } + return "" +} + +func (x *GetInstanceHealthRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +func (x *GetInstanceHealthRequest) GetWait() bool { + if x != nil { + return x.Wait + } + return false +} + +func (x *GetInstanceHealthRequest) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +type GetInstanceHealthResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Healthy bool `protobuf:"varint,1,opt,name=healthy,proto3" json:"healthy,omitempty"` + ReadyPods int32 `protobuf:"varint,2,opt,name=ready_pods,json=readyPods,proto3" json:"ready_pods,omitempty"` + TotalPods int32 `protobuf:"varint,3,opt,name=total_pods,json=totalPods,proto3" json:"total_pods,omitempty"` + ActivePods int32 `protobuf:"varint,4,opt,name=active_pods,json=activePods,proto3" json:"active_pods,omitempty"` + Pods []*PodStatus `protobuf:"bytes,5,rep,name=pods,proto3" json:"pods,omitempty"` + Complete bool `protobuf:"varint,6,opt,name=complete,proto3" json:"complete,omitempty"` // True on final message (healthy or timeout) + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` // Error message if failed + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInstanceHealthResponse) Reset() { + *x = GetInstanceHealthResponse{} + mi := &file_proto_osmanage_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInstanceHealthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInstanceHealthResponse) ProtoMessage() {} + +func (x *GetInstanceHealthResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInstanceHealthResponse.ProtoReflect.Descriptor instead. +func (*GetInstanceHealthResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{13} +} + +func (x *GetInstanceHealthResponse) GetHealthy() bool { + if x != nil { + return x.Healthy + } + return false +} + +func (x *GetInstanceHealthResponse) GetReadyPods() int32 { + if x != nil { + return x.ReadyPods + } + return 0 +} + +func (x *GetInstanceHealthResponse) GetTotalPods() int32 { + if x != nil { + return x.TotalPods + } + return 0 +} + +func (x *GetInstanceHealthResponse) GetActivePods() int32 { + if x != nil { + return x.ActivePods + } + return 0 +} + +func (x *GetInstanceHealthResponse) GetPods() []*PodStatus { + if x != nil { + return x.Pods + } + return nil +} + +func (x *GetInstanceHealthResponse) GetComplete() bool { + if x != nil { + return x.Complete + } + return false +} + +func (x *GetInstanceHealthResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type PodStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Phase string `protobuf:"bytes,2,opt,name=phase,proto3" json:"phase,omitempty"` // Running, Pending, etc. + Ready bool `protobuf:"varint,3,opt,name=ready,proto3" json:"ready,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PodStatus) Reset() { + *x = PodStatus{} + mi := &file_proto_osmanage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PodStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PodStatus) ProtoMessage() {} + +func (x *PodStatus) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PodStatus.ProtoReflect.Descriptor instead. +func (*PodStatus) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{14} +} + +func (x *PodStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PodStatus) GetPhase() string { + if x != nil { + return x.Phase + } + return "" +} + +func (x *PodStatus) GetReady() bool { + if x != nil { + return x.Ready + } + return false +} + +type ContainerStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + ContainerRegistry string `protobuf:"bytes,3,opt,name=container_registry,json=containerRegistry,proto3" json:"container_registry,omitempty"` + Ready bool `protobuf:"varint,4,opt,name=ready,proto3" json:"ready,omitempty"` + Started bool `protobuf:"varint,5,opt,name=started,proto3" json:"started,omitempty"` + EnvVars map[string]string `protobuf:"bytes,6,rep,name=env_vars,json=envVars,proto3" json:"env_vars,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ContainerStatus) Reset() { + *x = ContainerStatus{} + mi := &file_proto_osmanage_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ContainerStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerStatus) ProtoMessage() {} + +func (x *ContainerStatus) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerStatus.ProtoReflect.Descriptor instead. +func (*ContainerStatus) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{15} +} + +func (x *ContainerStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ContainerStatus) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *ContainerStatus) GetContainerRegistry() string { + if x != nil { + return x.ContainerRegistry + } + return "" +} + +func (x *ContainerStatus) GetReady() bool { + if x != nil { + return x.Ready + } + return false +} + +func (x *ContainerStatus) GetStarted() bool { + if x != nil { + return x.Started + } + return false +} + +func (x *ContainerStatus) GetEnvVars() map[string]string { + if x != nil { + return x.EnvVars + } + return nil +} + +type InstancePodStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + Node string `protobuf:"bytes,3,opt,name=node,proto3" json:"node,omitempty"` + Containers []*ContainerStatus `protobuf:"bytes,4,rep,name=containers,proto3" json:"containers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InstancePodStatus) Reset() { + *x = InstancePodStatus{} + mi := &file_proto_osmanage_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InstancePodStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstancePodStatus) ProtoMessage() {} + +func (x *InstancePodStatus) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstancePodStatus.ProtoReflect.Descriptor instead. +func (*InstancePodStatus) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{16} +} + +func (x *InstancePodStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InstancePodStatus) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *InstancePodStatus) GetNode() string { + if x != nil { + return x.Node + } + return "" +} + +func (x *InstancePodStatus) GetContainers() []*ContainerStatus { + if x != nil { + return x.Containers + } + return nil +} + +type GetInstanceStatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceUrl string `protobuf:"bytes,1,opt,name=instance_url,json=instanceUrl,proto3" json:"instance_url,omitempty"` + Kubeconfig string `protobuf:"bytes,2,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInstanceStatusRequest) Reset() { + *x = GetInstanceStatusRequest{} + mi := &file_proto_osmanage_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInstanceStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInstanceStatusRequest) ProtoMessage() {} + +func (x *GetInstanceStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInstanceStatusRequest.ProtoReflect.Descriptor instead. +func (*GetInstanceStatusRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{17} +} + +func (x *GetInstanceStatusRequest) GetInstanceUrl() string { + if x != nil { + return x.InstanceUrl + } + return "" +} + +func (x *GetInstanceStatusRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +type GetInstanceStatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Pods []*InstancePodStatus `protobuf:"bytes,1,rep,name=pods,proto3" json:"pods,omitempty"` + ServiceCounts map[string]int32 `protobuf:"bytes,2,rep,name=service_counts,json=serviceCounts,proto3" json:"service_counts,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + NamespaceExists bool `protobuf:"varint,3,opt,name=namespace_exists,json=namespaceExists,proto3" json:"namespace_exists,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetInstanceStatusResponse) Reset() { + *x = GetInstanceStatusResponse{} + mi := &file_proto_osmanage_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetInstanceStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInstanceStatusResponse) ProtoMessage() {} + +func (x *GetInstanceStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInstanceStatusResponse.ProtoReflect.Descriptor instead. +func (*GetInstanceStatusResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{18} +} + +func (x *GetInstanceStatusResponse) GetPods() []*InstancePodStatus { + if x != nil { + return x.Pods + } + return nil +} + +func (x *GetInstanceStatusResponse) GetServiceCounts() map[string]int32 { + if x != nil { + return x.ServiceCounts + } + return nil +} + +func (x *GetInstanceStatusResponse) GetNamespaceExists() bool { + if x != nil { + return x.NamespaceExists + } + return false +} + +type UpdateBackendmanageRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceUrl string `protobuf:"bytes,1,opt,name=instance_url,json=instanceUrl,proto3" json:"instance_url,omitempty"` + Kubeconfig string `protobuf:"bytes,2,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + Tag string `protobuf:"bytes,3,opt,name=tag,proto3" json:"tag,omitempty"` + ContainerRegistry string `protobuf:"bytes,4,opt,name=container_registry,json=containerRegistry,proto3" json:"container_registry,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,5,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateBackendmanageRequest) Reset() { + *x = UpdateBackendmanageRequest{} + mi := &file_proto_osmanage_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateBackendmanageRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateBackendmanageRequest) ProtoMessage() {} + +func (x *UpdateBackendmanageRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateBackendmanageRequest.ProtoReflect.Descriptor instead. +func (*UpdateBackendmanageRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{19} +} + +func (x *UpdateBackendmanageRequest) GetInstanceUrl() string { + if x != nil { + return x.InstanceUrl + } + return "" +} + +func (x *UpdateBackendmanageRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +func (x *UpdateBackendmanageRequest) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *UpdateBackendmanageRequest) GetContainerRegistry() string { + if x != nil { + return x.ContainerRegistry + } + return "" +} + +func (x *UpdateBackendmanageRequest) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +type UpdateBackendmanageResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Complete bool `protobuf:"varint,1,opt,name=complete,proto3" json:"complete,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + // Deployment rollout progress + ReadyReplicas int32 `protobuf:"varint,3,opt,name=ready_replicas,json=readyReplicas,proto3" json:"ready_replicas,omitempty"` + DesiredReplicas int32 `protobuf:"varint,4,opt,name=desired_replicas,json=desiredReplicas,proto3" json:"desired_replicas,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateBackendmanageResponse) Reset() { + *x = UpdateBackendmanageResponse{} + mi := &file_proto_osmanage_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateBackendmanageResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateBackendmanageResponse) ProtoMessage() {} + +func (x *UpdateBackendmanageResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateBackendmanageResponse.ProtoReflect.Descriptor instead. +func (*UpdateBackendmanageResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{20} +} + +func (x *UpdateBackendmanageResponse) GetComplete() bool { + if x != nil { + return x.Complete + } + return false +} + +func (x *UpdateBackendmanageResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *UpdateBackendmanageResponse) GetReadyReplicas() int32 { + if x != nil { + return x.ReadyReplicas + } + return 0 +} + +func (x *UpdateBackendmanageResponse) GetDesiredReplicas() int32 { + if x != nil { + return x.DesiredReplicas + } + return 0 +} + +type UpdateInstanceRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceDir string `protobuf:"bytes,1,opt,name=instance_dir,json=instanceDir,proto3" json:"instance_dir,omitempty"` + Kubeconfig string `protobuf:"bytes,2,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + SkipReadyCheck bool `protobuf:"varint,3,opt,name=skip_ready_check,json=skipReadyCheck,proto3" json:"skip_ready_check,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,4,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateInstanceRequest) Reset() { + *x = UpdateInstanceRequest{} + mi := &file_proto_osmanage_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateInstanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateInstanceRequest) ProtoMessage() {} + +func (x *UpdateInstanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateInstanceRequest.ProtoReflect.Descriptor instead. +func (*UpdateInstanceRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{21} +} + +func (x *UpdateInstanceRequest) GetInstanceDir() string { + if x != nil { + return x.InstanceDir + } + return "" +} + +func (x *UpdateInstanceRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +func (x *UpdateInstanceRequest) GetSkipReadyCheck() bool { + if x != nil { + return x.SkipReadyCheck + } + return false +} + +func (x *UpdateInstanceRequest) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +type UpdateInstanceResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Healthy bool `protobuf:"varint,1,opt,name=healthy,proto3" json:"healthy,omitempty"` + ReadyPods int32 `protobuf:"varint,2,opt,name=ready_pods,json=readyPods,proto3" json:"ready_pods,omitempty"` + TotalPods int32 `protobuf:"varint,3,opt,name=total_pods,json=totalPods,proto3" json:"total_pods,omitempty"` + ActivePods int32 `protobuf:"varint,4,opt,name=active_pods,json=activePods,proto3" json:"active_pods,omitempty"` + Pods []*PodStatus `protobuf:"bytes,5,rep,name=pods,proto3" json:"pods,omitempty"` + Complete bool `protobuf:"varint,6,opt,name=complete,proto3" json:"complete,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` + Inactive bool `protobuf:"varint,8,opt,name=inactive,proto3" json:"inactive,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateInstanceResponse) Reset() { + *x = UpdateInstanceResponse{} + mi := &file_proto_osmanage_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateInstanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateInstanceResponse) ProtoMessage() {} + +func (x *UpdateInstanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateInstanceResponse.ProtoReflect.Descriptor instead. +func (*UpdateInstanceResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{22} +} + +func (x *UpdateInstanceResponse) GetHealthy() bool { + if x != nil { + return x.Healthy + } + return false +} + +func (x *UpdateInstanceResponse) GetReadyPods() int32 { + if x != nil { + return x.ReadyPods + } + return 0 +} + +func (x *UpdateInstanceResponse) GetTotalPods() int32 { + if x != nil { + return x.TotalPods + } + return 0 +} + +func (x *UpdateInstanceResponse) GetActivePods() int32 { + if x != nil { + return x.ActivePods + } + return 0 +} + +func (x *UpdateInstanceResponse) GetPods() []*PodStatus { + if x != nil { + return x.Pods + } + return nil +} + +func (x *UpdateInstanceResponse) GetComplete() bool { + if x != nil { + return x.Complete + } + return false +} + +func (x *UpdateInstanceResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *UpdateInstanceResponse) GetInactive() bool { + if x != nil { + return x.Inactive + } + return false +} + +type ScaleServiceRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceDir string `protobuf:"bytes,1,opt,name=instance_dir,json=instanceDir,proto3" json:"instance_dir,omitempty"` + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + Kubeconfig string `protobuf:"bytes,3,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + SkipReadyCheck bool `protobuf:"varint,4,opt,name=skip_ready_check,json=skipReadyCheck,proto3" json:"skip_ready_check,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,5,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScaleServiceRequest) Reset() { + *x = ScaleServiceRequest{} + mi := &file_proto_osmanage_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScaleServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleServiceRequest) ProtoMessage() {} + +func (x *ScaleServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleServiceRequest.ProtoReflect.Descriptor instead. +func (*ScaleServiceRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{23} +} + +func (x *ScaleServiceRequest) GetInstanceDir() string { + if x != nil { + return x.InstanceDir + } + return "" +} + +func (x *ScaleServiceRequest) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ScaleServiceRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +func (x *ScaleServiceRequest) GetSkipReadyCheck() bool { + if x != nil { + return x.SkipReadyCheck + } + return false +} + +func (x *ScaleServiceRequest) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +type ScaleServiceResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Complete bool `protobuf:"varint,1,opt,name=complete,proto3" json:"complete,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + // Deployment rollout progress + ReadyReplicas int32 `protobuf:"varint,3,opt,name=ready_replicas,json=readyReplicas,proto3" json:"ready_replicas,omitempty"` + DesiredReplicas int32 `protobuf:"varint,4,opt,name=desired_replicas,json=desiredReplicas,proto3" json:"desired_replicas,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScaleServiceResponse) Reset() { + *x = ScaleServiceResponse{} + mi := &file_proto_osmanage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScaleServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleServiceResponse) ProtoMessage() {} + +func (x *ScaleServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleServiceResponse.ProtoReflect.Descriptor instead. +func (*ScaleServiceResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{24} +} + +func (x *ScaleServiceResponse) GetComplete() bool { + if x != nil { + return x.Complete + } + return false +} + +func (x *ScaleServiceResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *ScaleServiceResponse) GetReadyReplicas() int32 { + if x != nil { + return x.ReadyReplicas + } + return 0 +} + +func (x *ScaleServiceResponse) GetDesiredReplicas() int32 { + if x != nil { + return x.DesiredReplicas + } + return 0 +} + +type StartInstanceRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceDir string `protobuf:"bytes,1,opt,name=instance_dir,json=instanceDir,proto3" json:"instance_dir,omitempty"` + Kubeconfig string `protobuf:"bytes,2,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + SkipReadyCheck bool `protobuf:"varint,3,opt,name=skip_ready_check,json=skipReadyCheck,proto3" json:"skip_ready_check,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,4,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartInstanceRequest) Reset() { + *x = StartInstanceRequest{} + mi := &file_proto_osmanage_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartInstanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartInstanceRequest) ProtoMessage() {} + +func (x *StartInstanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartInstanceRequest.ProtoReflect.Descriptor instead. +func (*StartInstanceRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{25} +} + +func (x *StartInstanceRequest) GetInstanceDir() string { + if x != nil { + return x.InstanceDir + } + return "" +} + +func (x *StartInstanceRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +func (x *StartInstanceRequest) GetSkipReadyCheck() bool { + if x != nil { + return x.SkipReadyCheck + } + return false +} + +func (x *StartInstanceRequest) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +func (x *StartInstanceRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +type StartInstanceResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Healthy bool `protobuf:"varint,1,opt,name=healthy,proto3" json:"healthy,omitempty"` + ReadyPods int32 `protobuf:"varint,2,opt,name=ready_pods,json=readyPods,proto3" json:"ready_pods,omitempty"` + TotalPods int32 `protobuf:"varint,3,opt,name=total_pods,json=totalPods,proto3" json:"total_pods,omitempty"` + ActivePods int32 `protobuf:"varint,4,opt,name=active_pods,json=activePods,proto3" json:"active_pods,omitempty"` + Pods []*PodStatus `protobuf:"bytes,5,rep,name=pods,proto3" json:"pods,omitempty"` + Complete bool `protobuf:"varint,6,opt,name=complete,proto3" json:"complete,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartInstanceResponse) Reset() { + *x = StartInstanceResponse{} + mi := &file_proto_osmanage_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartInstanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartInstanceResponse) ProtoMessage() {} + +func (x *StartInstanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartInstanceResponse.ProtoReflect.Descriptor instead. +func (*StartInstanceResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{26} +} + +func (x *StartInstanceResponse) GetHealthy() bool { + if x != nil { + return x.Healthy + } + return false +} + +func (x *StartInstanceResponse) GetReadyPods() int32 { + if x != nil { + return x.ReadyPods + } + return 0 +} + +func (x *StartInstanceResponse) GetTotalPods() int32 { + if x != nil { + return x.TotalPods + } + return 0 +} + +func (x *StartInstanceResponse) GetActivePods() int32 { + if x != nil { + return x.ActivePods + } + return 0 +} + +func (x *StartInstanceResponse) GetPods() []*PodStatus { + if x != nil { + return x.Pods + } + return nil +} + +func (x *StartInstanceResponse) GetComplete() bool { + if x != nil { + return x.Complete + } + return false +} + +func (x *StartInstanceResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type StopInstanceRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstanceDir string `protobuf:"bytes,1,opt,name=instance_dir,json=instanceDir,proto3" json:"instance_dir,omitempty"` + Kubeconfig string `protobuf:"bytes,2,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,3,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopInstanceRequest) Reset() { + *x = StopInstanceRequest{} + mi := &file_proto_osmanage_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopInstanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopInstanceRequest) ProtoMessage() {} + +func (x *StopInstanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopInstanceRequest.ProtoReflect.Descriptor instead. +func (*StopInstanceRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{27} +} + +func (x *StopInstanceRequest) GetInstanceDir() string { + if x != nil { + return x.InstanceDir + } + return "" +} + +func (x *StopInstanceRequest) GetKubeconfig() string { + if x != nil { + return x.Kubeconfig + } + return "" +} + +func (x *StopInstanceRequest) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +type StopInstanceResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Complete bool `protobuf:"varint,1,opt,name=complete,proto3" json:"complete,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + ElapsedSeconds int32 `protobuf:"varint,3,opt,name=elapsed_seconds,json=elapsedSeconds,proto3" json:"elapsed_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopInstanceResponse) Reset() { + *x = StopInstanceResponse{} + mi := &file_proto_osmanage_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopInstanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopInstanceResponse) ProtoMessage() {} + +func (x *StopInstanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopInstanceResponse.ProtoReflect.Descriptor instead. +func (*StopInstanceResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{28} +} + +func (x *StopInstanceResponse) GetComplete() bool { + if x != nil { + return x.Complete + } + return false +} + +func (x *StopInstanceResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *StopInstanceResponse) GetElapsedSeconds() int32 { + if x != nil { + return x.ElapsedSeconds + } + return 0 +} + +// PostgreSQL database connection configuration for datastore access +type DatabaseConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"` + User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` + Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"` + PasswordFile string `protobuf:"bytes,5,opt,name=password_file,json=passwordFile,proto3" json:"password_file,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DatabaseConfig) Reset() { + *x = DatabaseConfig{} + mi := &file_proto_osmanage_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DatabaseConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatabaseConfig) ProtoMessage() {} + +func (x *DatabaseConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatabaseConfig.ProtoReflect.Descriptor instead. +func (*DatabaseConfig) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{29} +} + +func (x *DatabaseConfig) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *DatabaseConfig) GetPort() string { + if x != nil { + return x.Port + } + return "" +} + +func (x *DatabaseConfig) GetUser() string { + if x != nil { + return x.User + } + return "" +} + +func (x *DatabaseConfig) GetDatabase() string { + if x != nil { + return x.Database + } + return "" +} + +func (x *DatabaseConfig) GetPasswordFile() string { + if x != nil { + return x.PasswordFile + } + return "" +} + +// Query parameters for filtering and selecting datastore models +type QueryParams struct { + state protoimpl.MessageState `protogen:"open.v1"` + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` // Collection to query: "user", "meeting", or "organization" + Fields []string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` // Fields to return (empty = all fields) + ExistsOnly bool `protobuf:"varint,3,opt,name=exists_only,json=existsOnly,proto3" json:"exists_only,omitempty"` // Return only existence check (bool) instead of data + SimpleFilter map[string]string `protobuf:"bytes,4,rep,name=simple_filter,json=simpleFilter,proto3" json:"simple_filter,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Simple key=value filters (AND'd together) + RawFilter []byte `protobuf:"bytes,5,opt,name=raw_filter,json=rawFilter,proto3" json:"raw_filter,omitempty"` // Complex JSON filter (mutually exclusive with simple_filter) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *QueryParams) Reset() { + *x = QueryParams{} + mi := &file_proto_osmanage_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *QueryParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParams) ProtoMessage() {} + +func (x *QueryParams) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParams.ProtoReflect.Descriptor instead. +func (*QueryParams) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{30} +} + +func (x *QueryParams) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *QueryParams) GetFields() []string { + if x != nil { + return x.Fields + } + return nil +} + +func (x *QueryParams) GetExistsOnly() bool { + if x != nil { + return x.ExistsOnly + } + return false +} + +func (x *QueryParams) GetSimpleFilter() map[string]string { + if x != nil { + return x.SimpleFilter + } + return nil +} + +func (x *QueryParams) GetRawFilter() []byte { + if x != nil { + return x.RawFilter + } + return nil +} + +type GetCollectionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DbConfig *DatabaseConfig `protobuf:"bytes,1,opt,name=db_config,json=dbConfig,proto3" json:"db_config,omitempty"` // Database connection configuration + QueryParams *QueryParams `protobuf:"bytes,2,opt,name=query_params,json=queryParams,proto3" json:"query_params,omitempty"` // Query filtering and field selection + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetCollectionRequest) Reset() { + *x = GetCollectionRequest{} + mi := &file_proto_osmanage_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetCollectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCollectionRequest) ProtoMessage() {} + +func (x *GetCollectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCollectionRequest.ProtoReflect.Descriptor instead. +func (*GetCollectionRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{31} +} + +func (x *GetCollectionRequest) GetDbConfig() *DatabaseConfig { + if x != nil { + return x.DbConfig + } + return nil +} + +func (x *GetCollectionRequest) GetQueryParams() *QueryParams { + if x != nil { + return x.QueryParams + } + return nil +} + +type GetCollectionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` // Empty on success + // Types that are valid to be assigned to Result: + // + // *GetCollectionResponse_Exists + // *GetCollectionResponse_JsonData + Result isGetCollectionResponse_Result `protobuf_oneof:"result"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetCollectionResponse) Reset() { + *x = GetCollectionResponse{} + mi := &file_proto_osmanage_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetCollectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCollectionResponse) ProtoMessage() {} + +func (x *GetCollectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCollectionResponse.ProtoReflect.Descriptor instead. +func (*GetCollectionResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{32} +} + +func (x *GetCollectionResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *GetCollectionResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *GetCollectionResponse) GetResult() isGetCollectionResponse_Result { + if x != nil { + return x.Result + } + return nil +} + +func (x *GetCollectionResponse) GetExists() bool { + if x != nil { + if x, ok := x.Result.(*GetCollectionResponse_Exists); ok { + return x.Exists + } + } + return false +} + +func (x *GetCollectionResponse) GetJsonData() []byte { + if x != nil { + if x, ok := x.Result.(*GetCollectionResponse_JsonData); ok { + return x.JsonData + } + } + return nil +} + +type isGetCollectionResponse_Result interface { + isGetCollectionResponse_Result() +} + +type GetCollectionResponse_Exists struct { + Exists bool `protobuf:"varint,3,opt,name=exists,proto3,oneof"` // For exists_only queries +} + +type GetCollectionResponse_JsonData struct { + JsonData []byte `protobuf:"bytes,4,opt,name=json_data,json=jsonData,proto3,oneof"` // JSON-encoded model data +} + +func (*GetCollectionResponse_Exists) isGetCollectionResponse_Result() {} + +func (*GetCollectionResponse_JsonData) isGetCollectionResponse_Result() {} + +type MigrationsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + AddressBackendmanage string `protobuf:"bytes,1,opt,name=address_backendmanage,json=addressBackendmanage,proto3" json:"address_backendmanage,omitempty"` + PasswordFilePath string `protobuf:"bytes,2,opt,name=password_file_path,json=passwordFilePath,proto3" json:"password_file_path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrationsRequest) Reset() { + *x = MigrationsRequest{} + mi := &file_proto_osmanage_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrationsRequest) ProtoMessage() {} + +func (x *MigrationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrationsRequest.ProtoReflect.Descriptor instead. +func (*MigrationsRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{33} +} + +func (x *MigrationsRequest) GetAddressBackendmanage() string { + if x != nil { + return x.AddressBackendmanage + } + return "" +} + +func (x *MigrationsRequest) GetPasswordFilePath() string { + if x != nil { + return x.PasswordFilePath + } + return "" +} + +type MigrationsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + Output string `protobuf:"bytes,3,opt,name=output,proto3" json:"output,omitempty"` + Exception string `protobuf:"bytes,4,opt,name=exception,proto3" json:"exception,omitempty"` + Stats string `protobuf:"bytes,5,opt,name=stats,proto3" json:"stats,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrationsResponse) Reset() { + *x = MigrationsResponse{} + mi := &file_proto_osmanage_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrationsResponse) ProtoMessage() {} + +func (x *MigrationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrationsResponse.ProtoReflect.Descriptor instead. +func (*MigrationsResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{34} +} + +func (x *MigrationsResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *MigrationsResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *MigrationsResponse) GetOutput() string { + if x != nil { + return x.Output + } + return "" +} + +func (x *MigrationsResponse) GetException() string { + if x != nil { + return x.Exception + } + return "" +} + +func (x *MigrationsResponse) GetStats() string { + if x != nil { + return x.Stats + } + return "" +} + +type MigrationsProgressResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Running bool `protobuf:"varint,2,opt,name=running,proto3" json:"running,omitempty"` + Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"` + Exception string `protobuf:"bytes,4,opt,name=exception,proto3" json:"exception,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrationsProgressResponse) Reset() { + *x = MigrationsProgressResponse{} + mi := &file_proto_osmanage_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrationsProgressResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrationsProgressResponse) ProtoMessage() {} + +func (x *MigrationsProgressResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrationsProgressResponse.ProtoReflect.Descriptor instead. +func (*MigrationsProgressResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{35} +} + +func (x *MigrationsProgressResponse) GetOutput() string { + if x != nil { + return x.Output + } + return "" +} + +func (x *MigrationsProgressResponse) GetRunning() bool { + if x != nil { + return x.Running + } + return false +} + +func (x *MigrationsProgressResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *MigrationsProgressResponse) GetException() string { + if x != nil { + return x.Exception + } + return "" +} + +type SendManageActionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + AddressBackendmanage string `protobuf:"bytes,1,opt,name=address_backendmanage,json=addressBackendmanage,proto3" json:"address_backendmanage,omitempty"` + PasswordFilePath string `protobuf:"bytes,2,opt,name=password_file_path,json=passwordFilePath,proto3" json:"password_file_path,omitempty"` + Action string `protobuf:"bytes,3,opt,name=action,proto3" json:"action,omitempty"` + Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendManageActionRequest) Reset() { + *x = SendManageActionRequest{} + mi := &file_proto_osmanage_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendManageActionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendManageActionRequest) ProtoMessage() {} + +func (x *SendManageActionRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendManageActionRequest.ProtoReflect.Descriptor instead. +func (*SendManageActionRequest) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{36} +} + +func (x *SendManageActionRequest) GetAddressBackendmanage() string { + if x != nil { + return x.AddressBackendmanage + } + return "" +} + +func (x *SendManageActionRequest) GetPasswordFilePath() string { + if x != nil { + return x.PasswordFilePath + } + return "" +} + +func (x *SendManageActionRequest) GetAction() string { + if x != nil { + return x.Action + } + return "" +} + +func (x *SendManageActionRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type SendManageActionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendManageActionResponse) Reset() { + *x = SendManageActionResponse{} + mi := &file_proto_osmanage_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendManageActionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendManageActionResponse) ProtoMessage() {} + +func (x *SendManageActionResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_osmanage_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendManageActionResponse.ProtoReflect.Descriptor instead. +func (*SendManageActionResponse) Descriptor() ([]byte, []int) { + return file_proto_osmanage_proto_rawDescGZIP(), []int{37} +} + +func (x *SendManageActionResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *SendManageActionResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *SendManageActionResponse) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +var File_proto_osmanage_proto protoreflect.FileDescriptor + +const file_proto_osmanage_proto_rawDesc = "" + + "\n" + + "\x14proto/osmanage.proto\x12\bosmanage\"\xb0\x01\n" + + "\x15InstanceConfigRequest\x12!\n" + + "\finstance_dir\x18\x01 \x01(\tR\vinstanceDir\x12.\n" + + "\x13stack_template_path\x18\x02 \x01(\tR\x11stackTemplatePath\x12\x18\n" + + "\aconfigs\x18\x03 \x03(\fR\aconfigs\x12\x14\n" + + "\x05force\x18\x04 \x01(\bR\x05force\x12\x14\n" + + "\x05clean\x18\x05 \x01(\bR\x05clean\"H\n" + + "\x16InstanceConfigResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\"\x8c\x01\n" + + "\x15CreateInstanceRequest\x12!\n" + + "\finstance_dir\x18\x01 \x01(\tR\vinstanceDir\x12\x1f\n" + + "\vdb_password\x18\x02 \x01(\tR\n" + + "dbPassword\x12/\n" + + "\x13superadmin_password\x18\x03 \x01(\tR\x12superadminPassword\"H\n" + + "\x16CreateInstanceResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\"P\n" + + "\x15RemoveInstanceRequest\x12!\n" + + "\finstance_dir\x18\x01 \x01(\tR\vinstanceDir\x12\x14\n" + + "\x05force\x18\x02 \x01(\bR\x05force\"H\n" + + "\x16RemoveInstanceResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\"^\n" + + "\x19GetNamespaceExistsRequest\x12!\n" + + "\finstance_url\x18\x01 \x01(\tR\vinstanceUrl\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x02 \x01(\tR\n" + + "kubeconfig\"J\n" + + "\x1aGetNamespaceExistsResponse\x12\x16\n" + + "\x06exists\x18\x01 \x01(\bR\x06exists\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\"9\n" + + "\x17GetClusterStatusRequest\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x01 \x01(\tR\n" + + "kubeconfig\"t\n" + + "\x18GetClusterStatusResponse\x12\x16\n" + + "\x06status\x18\x01 \x01(\tR\x06status\x12\x1f\n" + + "\vtotal_nodes\x18\x02 \x01(\x05R\n" + + "totalNodes\x12\x1f\n" + + "\vready_nodes\x18\x03 \x01(\x05R\n" + + "readyNodes\"\x80\x01\n" + + "\x18GetServiceAddressRequest\x12!\n" + + "\finstance_url\x18\x01 \x01(\tR\vinstanceUrl\x12!\n" + + "\fservice_name\x18\x02 \x01(\tR\vserviceName\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x03 \x01(\tR\n" + + "kubeconfig\"K\n" + + "\x19GetServiceAddressResponse\x12\x18\n" + + "\aaddress\x18\x01 \x01(\tR\aaddress\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\"\x9a\x01\n" + + "\x18GetInstanceHealthRequest\x12!\n" + + "\finstance_url\x18\x01 \x01(\tR\vinstanceUrl\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x02 \x01(\tR\n" + + "kubeconfig\x12\x12\n" + + "\x04wait\x18\x03 \x01(\bR\x04wait\x12'\n" + + "\x0ftimeout_seconds\x18\x04 \x01(\x05R\x0etimeoutSeconds\"\xef\x01\n" + + "\x19GetInstanceHealthResponse\x12\x18\n" + + "\ahealthy\x18\x01 \x01(\bR\ahealthy\x12\x1d\n" + + "\n" + + "ready_pods\x18\x02 \x01(\x05R\treadyPods\x12\x1d\n" + + "\n" + + "total_pods\x18\x03 \x01(\x05R\ttotalPods\x12\x1f\n" + + "\vactive_pods\x18\x04 \x01(\x05R\n" + + "activePods\x12'\n" + + "\x04pods\x18\x05 \x03(\v2\x13.osmanage.PodStatusR\x04pods\x12\x1a\n" + + "\bcomplete\x18\x06 \x01(\bR\bcomplete\x12\x14\n" + + "\x05error\x18\a \x01(\tR\x05error\"K\n" + + "\tPodStatus\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" + + "\x05phase\x18\x02 \x01(\tR\x05phase\x12\x14\n" + + "\x05ready\x18\x03 \x01(\bR\x05ready\"\x95\x02\n" + + "\x0fContainerStatus\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\x12-\n" + + "\x12container_registry\x18\x03 \x01(\tR\x11containerRegistry\x12\x14\n" + + "\x05ready\x18\x04 \x01(\bR\x05ready\x12\x18\n" + + "\astarted\x18\x05 \x01(\bR\astarted\x12A\n" + + "\benv_vars\x18\x06 \x03(\v2&.osmanage.ContainerStatus.EnvVarsEntryR\aenvVars\x1a:\n" + + "\fEnvVarsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x90\x01\n" + + "\x11InstancePodStatus\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\aservice\x18\x02 \x01(\tR\aservice\x12\x12\n" + + "\x04node\x18\x03 \x01(\tR\x04node\x129\n" + + "\n" + + "containers\x18\x04 \x03(\v2\x19.osmanage.ContainerStatusR\n" + + "containers\"]\n" + + "\x18GetInstanceStatusRequest\x12!\n" + + "\finstance_url\x18\x01 \x01(\tR\vinstanceUrl\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x02 \x01(\tR\n" + + "kubeconfig\"\x98\x02\n" + + "\x19GetInstanceStatusResponse\x12/\n" + + "\x04pods\x18\x01 \x03(\v2\x1b.osmanage.InstancePodStatusR\x04pods\x12]\n" + + "\x0eservice_counts\x18\x02 \x03(\v26.osmanage.GetInstanceStatusResponse.ServiceCountsEntryR\rserviceCounts\x12)\n" + + "\x10namespace_exists\x18\x03 \x01(\bR\x0fnamespaceExists\x1a@\n" + + "\x12ServiceCountsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x05R\x05value:\x028\x01\"\xc9\x01\n" + + "\x1aUpdateBackendmanageRequest\x12!\n" + + "\finstance_url\x18\x01 \x01(\tR\vinstanceUrl\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x02 \x01(\tR\n" + + "kubeconfig\x12\x10\n" + + "\x03tag\x18\x03 \x01(\tR\x03tag\x12-\n" + + "\x12container_registry\x18\x04 \x01(\tR\x11containerRegistry\x12'\n" + + "\x0ftimeout_seconds\x18\x05 \x01(\x05R\x0etimeoutSeconds\"\xa1\x01\n" + + "\x1bUpdateBackendmanageResponse\x12\x1a\n" + + "\bcomplete\x18\x01 \x01(\bR\bcomplete\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\x12%\n" + + "\x0eready_replicas\x18\x03 \x01(\x05R\rreadyReplicas\x12)\n" + + "\x10desired_replicas\x18\x04 \x01(\x05R\x0fdesiredReplicas\"\xad\x01\n" + + "\x15UpdateInstanceRequest\x12!\n" + + "\finstance_dir\x18\x01 \x01(\tR\vinstanceDir\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x02 \x01(\tR\n" + + "kubeconfig\x12(\n" + + "\x10skip_ready_check\x18\x03 \x01(\bR\x0eskipReadyCheck\x12'\n" + + "\x0ftimeout_seconds\x18\x04 \x01(\x05R\x0etimeoutSeconds\"\x88\x02\n" + + "\x16UpdateInstanceResponse\x12\x18\n" + + "\ahealthy\x18\x01 \x01(\bR\ahealthy\x12\x1d\n" + + "\n" + + "ready_pods\x18\x02 \x01(\x05R\treadyPods\x12\x1d\n" + + "\n" + + "total_pods\x18\x03 \x01(\x05R\ttotalPods\x12\x1f\n" + + "\vactive_pods\x18\x04 \x01(\x05R\n" + + "activePods\x12'\n" + + "\x04pods\x18\x05 \x03(\v2\x13.osmanage.PodStatusR\x04pods\x12\x1a\n" + + "\bcomplete\x18\x06 \x01(\bR\bcomplete\x12\x14\n" + + "\x05error\x18\a \x01(\tR\x05error\x12\x1a\n" + + "\binactive\x18\b \x01(\bR\binactive\"\xc5\x01\n" + + "\x13ScaleServiceRequest\x12!\n" + + "\finstance_dir\x18\x01 \x01(\tR\vinstanceDir\x12\x18\n" + + "\aservice\x18\x02 \x01(\tR\aservice\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x03 \x01(\tR\n" + + "kubeconfig\x12(\n" + + "\x10skip_ready_check\x18\x04 \x01(\bR\x0eskipReadyCheck\x12'\n" + + "\x0ftimeout_seconds\x18\x05 \x01(\x05R\x0etimeoutSeconds\"\x9a\x01\n" + + "\x14ScaleServiceResponse\x12\x1a\n" + + "\bcomplete\x18\x01 \x01(\bR\bcomplete\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\x12%\n" + + "\x0eready_replicas\x18\x03 \x01(\x05R\rreadyReplicas\x12)\n" + + "\x10desired_replicas\x18\x04 \x01(\x05R\x0fdesiredReplicas\"\xab\x02\n" + + "\x14StartInstanceRequest\x12!\n" + + "\finstance_dir\x18\x01 \x01(\tR\vinstanceDir\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x02 \x01(\tR\n" + + "kubeconfig\x12(\n" + + "\x10skip_ready_check\x18\x03 \x01(\bR\x0eskipReadyCheck\x12'\n" + + "\x0ftimeout_seconds\x18\x04 \x01(\x05R\x0etimeoutSeconds\x12B\n" + + "\x06labels\x18\x05 \x03(\v2*.osmanage.StartInstanceRequest.LabelsEntryR\x06labels\x1a9\n" + + "\vLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xeb\x01\n" + + "\x15StartInstanceResponse\x12\x18\n" + + "\ahealthy\x18\x01 \x01(\bR\ahealthy\x12\x1d\n" + + "\n" + + "ready_pods\x18\x02 \x01(\x05R\treadyPods\x12\x1d\n" + + "\n" + + "total_pods\x18\x03 \x01(\x05R\ttotalPods\x12\x1f\n" + + "\vactive_pods\x18\x04 \x01(\x05R\n" + + "activePods\x12'\n" + + "\x04pods\x18\x05 \x03(\v2\x13.osmanage.PodStatusR\x04pods\x12\x1a\n" + + "\bcomplete\x18\x06 \x01(\bR\bcomplete\x12\x14\n" + + "\x05error\x18\a \x01(\tR\x05error\"\x81\x01\n" + + "\x13StopInstanceRequest\x12!\n" + + "\finstance_dir\x18\x01 \x01(\tR\vinstanceDir\x12\x1e\n" + + "\n" + + "kubeconfig\x18\x02 \x01(\tR\n" + + "kubeconfig\x12'\n" + + "\x0ftimeout_seconds\x18\x03 \x01(\x05R\x0etimeoutSeconds\"q\n" + + "\x14StopInstanceResponse\x12\x1a\n" + + "\bcomplete\x18\x01 \x01(\bR\bcomplete\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\x12'\n" + + "\x0felapsed_seconds\x18\x03 \x01(\x05R\x0eelapsedSeconds\"\x8d\x01\n" + + "\x0eDatabaseConfig\x12\x12\n" + + "\x04host\x18\x01 \x01(\tR\x04host\x12\x12\n" + + "\x04port\x18\x02 \x01(\tR\x04port\x12\x12\n" + + "\x04user\x18\x03 \x01(\tR\x04user\x12\x1a\n" + + "\bdatabase\x18\x04 \x01(\tR\bdatabase\x12#\n" + + "\rpassword_file\x18\x05 \x01(\tR\fpasswordFile\"\x94\x02\n" + + "\vQueryParams\x12\x1e\n" + + "\n" + + "collection\x18\x01 \x01(\tR\n" + + "collection\x12\x16\n" + + "\x06fields\x18\x02 \x03(\tR\x06fields\x12\x1f\n" + + "\vexists_only\x18\x03 \x01(\bR\n" + + "existsOnly\x12L\n" + + "\rsimple_filter\x18\x04 \x03(\v2'.osmanage.QueryParams.SimpleFilterEntryR\fsimpleFilter\x12\x1d\n" + + "\n" + + "raw_filter\x18\x05 \x01(\fR\trawFilter\x1a?\n" + + "\x11SimpleFilterEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x87\x01\n" + + "\x14GetCollectionRequest\x125\n" + + "\tdb_config\x18\x01 \x01(\v2\x18.osmanage.DatabaseConfigR\bdbConfig\x128\n" + + "\fquery_params\x18\x02 \x01(\v2\x15.osmanage.QueryParamsR\vqueryParams\"\x8a\x01\n" + + "\x15GetCollectionResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\x12\x18\n" + + "\x06exists\x18\x03 \x01(\bH\x00R\x06exists\x12\x1d\n" + + "\tjson_data\x18\x04 \x01(\fH\x00R\bjsonDataB\b\n" + + "\x06result\"v\n" + + "\x11MigrationsRequest\x123\n" + + "\x15address_backendmanage\x18\x01 \x01(\tR\x14addressBackendmanage\x12,\n" + + "\x12password_file_path\x18\x02 \x01(\tR\x10passwordFilePath\"\x92\x01\n" + + "\x12MigrationsResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x16\n" + + "\x06status\x18\x02 \x01(\tR\x06status\x12\x16\n" + + "\x06output\x18\x03 \x01(\tR\x06output\x12\x1c\n" + + "\texception\x18\x04 \x01(\tR\texception\x12\x14\n" + + "\x05stats\x18\x05 \x01(\tR\x05stats\"\x86\x01\n" + + "\x1aMigrationsProgressResponse\x12\x16\n" + + "\x06output\x18\x01 \x01(\tR\x06output\x12\x18\n" + + "\arunning\x18\x02 \x01(\bR\arunning\x12\x18\n" + + "\asuccess\x18\x03 \x01(\bR\asuccess\x12\x1c\n" + + "\texception\x18\x04 \x01(\tR\texception\"\xae\x01\n" + + "\x17SendManageActionRequest\x123\n" + + "\x15address_backendmanage\x18\x01 \x01(\tR\x14addressBackendmanage\x12,\n" + + "\x12password_file_path\x18\x02 \x01(\tR\x10passwordFilePath\x12\x16\n" + + "\x06action\x18\x03 \x01(\tR\x06action\x12\x18\n" + + "\apayload\x18\x04 \x01(\fR\apayload\"^\n" + + "\x18SendManageActionResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\x12\x12\n" + + "\x04body\x18\x03 \x01(\fR\x04body2\xa1\x0f\n" + + "\x0fOsmanageService\x12R\n" + + "\rSetupInstance\x12\x1f.osmanage.InstanceConfigRequest\x1a .osmanage.InstanceConfigResponse\x12S\n" + + "\x0eConfigInstance\x12\x1f.osmanage.InstanceConfigRequest\x1a .osmanage.InstanceConfigResponse\x12S\n" + + "\x0eCreateInstance\x12\x1f.osmanage.CreateInstanceRequest\x1a .osmanage.CreateInstanceResponse\x12S\n" + + "\x0eRemoveInstance\x12\x1f.osmanage.RemoveInstanceRequest\x1a .osmanage.RemoveInstanceResponse\x12_\n" + + "\x12GetNamespaceExists\x12#.osmanage.GetNamespaceExistsRequest\x1a$.osmanage.GetNamespaceExistsResponse\x12Y\n" + + "\x10GetClusterStatus\x12!.osmanage.GetClusterStatusRequest\x1a\".osmanage.GetClusterStatusResponse\x12\\\n" + + "\x11GetServiceAddress\x12\".osmanage.GetServiceAddressRequest\x1a#.osmanage.GetServiceAddressResponse\x12^\n" + + "\x11GetInstanceHealth\x12\".osmanage.GetInstanceHealthRequest\x1a#.osmanage.GetInstanceHealthResponse0\x01\x12\\\n" + + "\x11GetInstanceStatus\x12\".osmanage.GetInstanceStatusRequest\x1a#.osmanage.GetInstanceStatusResponse\x12d\n" + + "\x13UpdateBackendmanage\x12$.osmanage.UpdateBackendmanageRequest\x1a%.osmanage.UpdateBackendmanageResponse0\x01\x12U\n" + + "\x0eUpdateInstance\x12\x1f.osmanage.UpdateInstanceRequest\x1a .osmanage.UpdateInstanceResponse0\x01\x12O\n" + + "\fScaleService\x12\x1d.osmanage.ScaleServiceRequest\x1a\x1e.osmanage.ScaleServiceResponse0\x01\x12R\n" + + "\rStartInstance\x12\x1e.osmanage.StartInstanceRequest\x1a\x1f.osmanage.StartInstanceResponse0\x01\x12O\n" + + "\fStopInstance\x12\x1d.osmanage.StopInstanceRequest\x1a\x1e.osmanage.StopInstanceResponse0\x01\x12P\n" + + "\rGetCollection\x12\x1e.osmanage.GetCollectionRequest\x1a\x1f.osmanage.GetCollectionResponse\x12X\n" + + "\x11MigrationsMigrate\x12\x1b.osmanage.MigrationsRequest\x1a$.osmanage.MigrationsProgressResponse0\x01\x12Y\n" + + "\x12MigrationsFinalize\x12\x1b.osmanage.MigrationsRequest\x1a$.osmanage.MigrationsProgressResponse0\x01\x12L\n" + + "\x0fMigrationsReset\x12\x1b.osmanage.MigrationsRequest\x1a\x1c.osmanage.MigrationsResponse\x12a\n" + + "$MigrationsClearCollectionfieldTables\x12\x1b.osmanage.MigrationsRequest\x1a\x1c.osmanage.MigrationsResponse\x12L\n" + + "\x0fMigrationsStats\x12\x1b.osmanage.MigrationsRequest\x1a\x1c.osmanage.MigrationsResponse\x12O\n" + + "\x12MigrationsProgress\x12\x1b.osmanage.MigrationsRequest\x1a\x1c.osmanage.MigrationsResponse\x12Y\n" + + "\x10SendManageAction\x12!.osmanage.SendManageActionRequest\x1a\".osmanage.SendManageActionResponseB5Z3github.com/OpenSlides/openslides-cli/proto/osmanageb\x06proto3" + +var ( + file_proto_osmanage_proto_rawDescOnce sync.Once + file_proto_osmanage_proto_rawDescData []byte +) + +func file_proto_osmanage_proto_rawDescGZIP() []byte { + file_proto_osmanage_proto_rawDescOnce.Do(func() { + file_proto_osmanage_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_osmanage_proto_rawDesc), len(file_proto_osmanage_proto_rawDesc))) + }) + return file_proto_osmanage_proto_rawDescData +} + +var file_proto_osmanage_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_proto_osmanage_proto_goTypes = []any{ + (*InstanceConfigRequest)(nil), // 0: osmanage.InstanceConfigRequest + (*InstanceConfigResponse)(nil), // 1: osmanage.InstanceConfigResponse + (*CreateInstanceRequest)(nil), // 2: osmanage.CreateInstanceRequest + (*CreateInstanceResponse)(nil), // 3: osmanage.CreateInstanceResponse + (*RemoveInstanceRequest)(nil), // 4: osmanage.RemoveInstanceRequest + (*RemoveInstanceResponse)(nil), // 5: osmanage.RemoveInstanceResponse + (*GetNamespaceExistsRequest)(nil), // 6: osmanage.GetNamespaceExistsRequest + (*GetNamespaceExistsResponse)(nil), // 7: osmanage.GetNamespaceExistsResponse + (*GetClusterStatusRequest)(nil), // 8: osmanage.GetClusterStatusRequest + (*GetClusterStatusResponse)(nil), // 9: osmanage.GetClusterStatusResponse + (*GetServiceAddressRequest)(nil), // 10: osmanage.GetServiceAddressRequest + (*GetServiceAddressResponse)(nil), // 11: osmanage.GetServiceAddressResponse + (*GetInstanceHealthRequest)(nil), // 12: osmanage.GetInstanceHealthRequest + (*GetInstanceHealthResponse)(nil), // 13: osmanage.GetInstanceHealthResponse + (*PodStatus)(nil), // 14: osmanage.PodStatus + (*ContainerStatus)(nil), // 15: osmanage.ContainerStatus + (*InstancePodStatus)(nil), // 16: osmanage.InstancePodStatus + (*GetInstanceStatusRequest)(nil), // 17: osmanage.GetInstanceStatusRequest + (*GetInstanceStatusResponse)(nil), // 18: osmanage.GetInstanceStatusResponse + (*UpdateBackendmanageRequest)(nil), // 19: osmanage.UpdateBackendmanageRequest + (*UpdateBackendmanageResponse)(nil), // 20: osmanage.UpdateBackendmanageResponse + (*UpdateInstanceRequest)(nil), // 21: osmanage.UpdateInstanceRequest + (*UpdateInstanceResponse)(nil), // 22: osmanage.UpdateInstanceResponse + (*ScaleServiceRequest)(nil), // 23: osmanage.ScaleServiceRequest + (*ScaleServiceResponse)(nil), // 24: osmanage.ScaleServiceResponse + (*StartInstanceRequest)(nil), // 25: osmanage.StartInstanceRequest + (*StartInstanceResponse)(nil), // 26: osmanage.StartInstanceResponse + (*StopInstanceRequest)(nil), // 27: osmanage.StopInstanceRequest + (*StopInstanceResponse)(nil), // 28: osmanage.StopInstanceResponse + (*DatabaseConfig)(nil), // 29: osmanage.DatabaseConfig + (*QueryParams)(nil), // 30: osmanage.QueryParams + (*GetCollectionRequest)(nil), // 31: osmanage.GetCollectionRequest + (*GetCollectionResponse)(nil), // 32: osmanage.GetCollectionResponse + (*MigrationsRequest)(nil), // 33: osmanage.MigrationsRequest + (*MigrationsResponse)(nil), // 34: osmanage.MigrationsResponse + (*MigrationsProgressResponse)(nil), // 35: osmanage.MigrationsProgressResponse + (*SendManageActionRequest)(nil), // 36: osmanage.SendManageActionRequest + (*SendManageActionResponse)(nil), // 37: osmanage.SendManageActionResponse + nil, // 38: osmanage.ContainerStatus.EnvVarsEntry + nil, // 39: osmanage.GetInstanceStatusResponse.ServiceCountsEntry + nil, // 40: osmanage.StartInstanceRequest.LabelsEntry + nil, // 41: osmanage.QueryParams.SimpleFilterEntry +} +var file_proto_osmanage_proto_depIdxs = []int32{ + 14, // 0: osmanage.GetInstanceHealthResponse.pods:type_name -> osmanage.PodStatus + 38, // 1: osmanage.ContainerStatus.env_vars:type_name -> osmanage.ContainerStatus.EnvVarsEntry + 15, // 2: osmanage.InstancePodStatus.containers:type_name -> osmanage.ContainerStatus + 16, // 3: osmanage.GetInstanceStatusResponse.pods:type_name -> osmanage.InstancePodStatus + 39, // 4: osmanage.GetInstanceStatusResponse.service_counts:type_name -> osmanage.GetInstanceStatusResponse.ServiceCountsEntry + 14, // 5: osmanage.UpdateInstanceResponse.pods:type_name -> osmanage.PodStatus + 40, // 6: osmanage.StartInstanceRequest.labels:type_name -> osmanage.StartInstanceRequest.LabelsEntry + 14, // 7: osmanage.StartInstanceResponse.pods:type_name -> osmanage.PodStatus + 41, // 8: osmanage.QueryParams.simple_filter:type_name -> osmanage.QueryParams.SimpleFilterEntry + 29, // 9: osmanage.GetCollectionRequest.db_config:type_name -> osmanage.DatabaseConfig + 30, // 10: osmanage.GetCollectionRequest.query_params:type_name -> osmanage.QueryParams + 0, // 11: osmanage.OsmanageService.SetupInstance:input_type -> osmanage.InstanceConfigRequest + 0, // 12: osmanage.OsmanageService.ConfigInstance:input_type -> osmanage.InstanceConfigRequest + 2, // 13: osmanage.OsmanageService.CreateInstance:input_type -> osmanage.CreateInstanceRequest + 4, // 14: osmanage.OsmanageService.RemoveInstance:input_type -> osmanage.RemoveInstanceRequest + 6, // 15: osmanage.OsmanageService.GetNamespaceExists:input_type -> osmanage.GetNamespaceExistsRequest + 8, // 16: osmanage.OsmanageService.GetClusterStatus:input_type -> osmanage.GetClusterStatusRequest + 10, // 17: osmanage.OsmanageService.GetServiceAddress:input_type -> osmanage.GetServiceAddressRequest + 12, // 18: osmanage.OsmanageService.GetInstanceHealth:input_type -> osmanage.GetInstanceHealthRequest + 17, // 19: osmanage.OsmanageService.GetInstanceStatus:input_type -> osmanage.GetInstanceStatusRequest + 19, // 20: osmanage.OsmanageService.UpdateBackendmanage:input_type -> osmanage.UpdateBackendmanageRequest + 21, // 21: osmanage.OsmanageService.UpdateInstance:input_type -> osmanage.UpdateInstanceRequest + 23, // 22: osmanage.OsmanageService.ScaleService:input_type -> osmanage.ScaleServiceRequest + 25, // 23: osmanage.OsmanageService.StartInstance:input_type -> osmanage.StartInstanceRequest + 27, // 24: osmanage.OsmanageService.StopInstance:input_type -> osmanage.StopInstanceRequest + 31, // 25: osmanage.OsmanageService.GetCollection:input_type -> osmanage.GetCollectionRequest + 33, // 26: osmanage.OsmanageService.MigrationsMigrate:input_type -> osmanage.MigrationsRequest + 33, // 27: osmanage.OsmanageService.MigrationsFinalize:input_type -> osmanage.MigrationsRequest + 33, // 28: osmanage.OsmanageService.MigrationsReset:input_type -> osmanage.MigrationsRequest + 33, // 29: osmanage.OsmanageService.MigrationsClearCollectionfieldTables:input_type -> osmanage.MigrationsRequest + 33, // 30: osmanage.OsmanageService.MigrationsStats:input_type -> osmanage.MigrationsRequest + 33, // 31: osmanage.OsmanageService.MigrationsProgress:input_type -> osmanage.MigrationsRequest + 36, // 32: osmanage.OsmanageService.SendManageAction:input_type -> osmanage.SendManageActionRequest + 1, // 33: osmanage.OsmanageService.SetupInstance:output_type -> osmanage.InstanceConfigResponse + 1, // 34: osmanage.OsmanageService.ConfigInstance:output_type -> osmanage.InstanceConfigResponse + 3, // 35: osmanage.OsmanageService.CreateInstance:output_type -> osmanage.CreateInstanceResponse + 5, // 36: osmanage.OsmanageService.RemoveInstance:output_type -> osmanage.RemoveInstanceResponse + 7, // 37: osmanage.OsmanageService.GetNamespaceExists:output_type -> osmanage.GetNamespaceExistsResponse + 9, // 38: osmanage.OsmanageService.GetClusterStatus:output_type -> osmanage.GetClusterStatusResponse + 11, // 39: osmanage.OsmanageService.GetServiceAddress:output_type -> osmanage.GetServiceAddressResponse + 13, // 40: osmanage.OsmanageService.GetInstanceHealth:output_type -> osmanage.GetInstanceHealthResponse + 18, // 41: osmanage.OsmanageService.GetInstanceStatus:output_type -> osmanage.GetInstanceStatusResponse + 20, // 42: osmanage.OsmanageService.UpdateBackendmanage:output_type -> osmanage.UpdateBackendmanageResponse + 22, // 43: osmanage.OsmanageService.UpdateInstance:output_type -> osmanage.UpdateInstanceResponse + 24, // 44: osmanage.OsmanageService.ScaleService:output_type -> osmanage.ScaleServiceResponse + 26, // 45: osmanage.OsmanageService.StartInstance:output_type -> osmanage.StartInstanceResponse + 28, // 46: osmanage.OsmanageService.StopInstance:output_type -> osmanage.StopInstanceResponse + 32, // 47: osmanage.OsmanageService.GetCollection:output_type -> osmanage.GetCollectionResponse + 35, // 48: osmanage.OsmanageService.MigrationsMigrate:output_type -> osmanage.MigrationsProgressResponse + 35, // 49: osmanage.OsmanageService.MigrationsFinalize:output_type -> osmanage.MigrationsProgressResponse + 34, // 50: osmanage.OsmanageService.MigrationsReset:output_type -> osmanage.MigrationsResponse + 34, // 51: osmanage.OsmanageService.MigrationsClearCollectionfieldTables:output_type -> osmanage.MigrationsResponse + 34, // 52: osmanage.OsmanageService.MigrationsStats:output_type -> osmanage.MigrationsResponse + 34, // 53: osmanage.OsmanageService.MigrationsProgress:output_type -> osmanage.MigrationsResponse + 37, // 54: osmanage.OsmanageService.SendManageAction:output_type -> osmanage.SendManageActionResponse + 33, // [33:55] is the sub-list for method output_type + 11, // [11:33] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_proto_osmanage_proto_init() } +func file_proto_osmanage_proto_init() { + if File_proto_osmanage_proto != nil { + return + } + file_proto_osmanage_proto_msgTypes[32].OneofWrappers = []any{ + (*GetCollectionResponse_Exists)(nil), + (*GetCollectionResponse_JsonData)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_osmanage_proto_rawDesc), len(file_proto_osmanage_proto_rawDesc)), + NumEnums: 0, + NumMessages: 42, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_osmanage_proto_goTypes, + DependencyIndexes: file_proto_osmanage_proto_depIdxs, + MessageInfos: file_proto_osmanage_proto_msgTypes, + }.Build() + File_proto_osmanage_proto = out.File + file_proto_osmanage_proto_goTypes = nil + file_proto_osmanage_proto_depIdxs = nil +} diff --git a/proto/osmanage/osmanage_grpc.pb.go b/proto/osmanage/osmanage_grpc.pb.go new file mode 100644 index 0000000..b014f00 --- /dev/null +++ b/proto/osmanage/osmanage_grpc.pb.go @@ -0,0 +1,956 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.1 +// - protoc v4.25.1 +// source: proto/osmanage.proto + +package osmanage + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + OsmanageService_SetupInstance_FullMethodName = "/osmanage.OsmanageService/SetupInstance" + OsmanageService_ConfigInstance_FullMethodName = "/osmanage.OsmanageService/ConfigInstance" + OsmanageService_CreateInstance_FullMethodName = "/osmanage.OsmanageService/CreateInstance" + OsmanageService_RemoveInstance_FullMethodName = "/osmanage.OsmanageService/RemoveInstance" + OsmanageService_GetNamespaceExists_FullMethodName = "/osmanage.OsmanageService/GetNamespaceExists" + OsmanageService_GetClusterStatus_FullMethodName = "/osmanage.OsmanageService/GetClusterStatus" + OsmanageService_GetServiceAddress_FullMethodName = "/osmanage.OsmanageService/GetServiceAddress" + OsmanageService_GetInstanceHealth_FullMethodName = "/osmanage.OsmanageService/GetInstanceHealth" + OsmanageService_GetInstanceStatus_FullMethodName = "/osmanage.OsmanageService/GetInstanceStatus" + OsmanageService_UpdateBackendmanage_FullMethodName = "/osmanage.OsmanageService/UpdateBackendmanage" + OsmanageService_UpdateInstance_FullMethodName = "/osmanage.OsmanageService/UpdateInstance" + OsmanageService_ScaleService_FullMethodName = "/osmanage.OsmanageService/ScaleService" + OsmanageService_StartInstance_FullMethodName = "/osmanage.OsmanageService/StartInstance" + OsmanageService_StopInstance_FullMethodName = "/osmanage.OsmanageService/StopInstance" + OsmanageService_GetCollection_FullMethodName = "/osmanage.OsmanageService/GetCollection" + OsmanageService_MigrationsMigrate_FullMethodName = "/osmanage.OsmanageService/MigrationsMigrate" + OsmanageService_MigrationsFinalize_FullMethodName = "/osmanage.OsmanageService/MigrationsFinalize" + OsmanageService_MigrationsReset_FullMethodName = "/osmanage.OsmanageService/MigrationsReset" + OsmanageService_MigrationsClearCollectionfieldTables_FullMethodName = "/osmanage.OsmanageService/MigrationsClearCollectionfieldTables" + OsmanageService_MigrationsStats_FullMethodName = "/osmanage.OsmanageService/MigrationsStats" + OsmanageService_MigrationsProgress_FullMethodName = "/osmanage.OsmanageService/MigrationsProgress" + OsmanageService_SendManageAction_FullMethodName = "/osmanage.OsmanageService/SendManageAction" +) + +// OsmanageServiceClient is the client API for OsmanageService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OsmanageServiceClient interface { + // Instance filesystem operations + SetupInstance(ctx context.Context, in *InstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfigResponse, error) + ConfigInstance(ctx context.Context, in *InstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfigResponse, error) + CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*CreateInstanceResponse, error) + RemoveInstance(ctx context.Context, in *RemoveInstanceRequest, opts ...grpc.CallOption) (*RemoveInstanceResponse, error) + // k8s actions + GetNamespaceExists(ctx context.Context, in *GetNamespaceExistsRequest, opts ...grpc.CallOption) (*GetNamespaceExistsResponse, error) + GetClusterStatus(ctx context.Context, in *GetClusterStatusRequest, opts ...grpc.CallOption) (*GetClusterStatusResponse, error) + GetServiceAddress(ctx context.Context, in *GetServiceAddressRequest, opts ...grpc.CallOption) (*GetServiceAddressResponse, error) + GetInstanceHealth(ctx context.Context, in *GetInstanceHealthRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetInstanceHealthResponse], error) + GetInstanceStatus(ctx context.Context, in *GetInstanceStatusRequest, opts ...grpc.CallOption) (*GetInstanceStatusResponse, error) + UpdateBackendmanage(ctx context.Context, in *UpdateBackendmanageRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[UpdateBackendmanageResponse], error) + UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[UpdateInstanceResponse], error) + ScaleService(ctx context.Context, in *ScaleServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ScaleServiceResponse], error) + StartInstance(ctx context.Context, in *StartInstanceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StartInstanceResponse], error) + StopInstance(ctx context.Context, in *StopInstanceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StopInstanceResponse], error) + // datastore query action (via openslides-go) + GetCollection(ctx context.Context, in *GetCollectionRequest, opts ...grpc.CallOption) (*GetCollectionResponse, error) + // server side streaming + MigrationsMigrate(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[MigrationsProgressResponse], error) + MigrationsFinalize(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[MigrationsProgressResponse], error) + // unary gRPC calls + MigrationsReset(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) + MigrationsClearCollectionfieldTables(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) + MigrationsStats(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) + MigrationsProgress(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) + // generic manage action via backendmanage client + SendManageAction(ctx context.Context, in *SendManageActionRequest, opts ...grpc.CallOption) (*SendManageActionResponse, error) +} + +type osmanageServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewOsmanageServiceClient(cc grpc.ClientConnInterface) OsmanageServiceClient { + return &osmanageServiceClient{cc} +} + +func (c *osmanageServiceClient) SetupInstance(ctx context.Context, in *InstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(InstanceConfigResponse) + err := c.cc.Invoke(ctx, OsmanageService_SetupInstance_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) ConfigInstance(ctx context.Context, in *InstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(InstanceConfigResponse) + err := c.cc.Invoke(ctx, OsmanageService_ConfigInstance_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*CreateInstanceResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateInstanceResponse) + err := c.cc.Invoke(ctx, OsmanageService_CreateInstance_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) RemoveInstance(ctx context.Context, in *RemoveInstanceRequest, opts ...grpc.CallOption) (*RemoveInstanceResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RemoveInstanceResponse) + err := c.cc.Invoke(ctx, OsmanageService_RemoveInstance_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) GetNamespaceExists(ctx context.Context, in *GetNamespaceExistsRequest, opts ...grpc.CallOption) (*GetNamespaceExistsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetNamespaceExistsResponse) + err := c.cc.Invoke(ctx, OsmanageService_GetNamespaceExists_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) GetClusterStatus(ctx context.Context, in *GetClusterStatusRequest, opts ...grpc.CallOption) (*GetClusterStatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetClusterStatusResponse) + err := c.cc.Invoke(ctx, OsmanageService_GetClusterStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) GetServiceAddress(ctx context.Context, in *GetServiceAddressRequest, opts ...grpc.CallOption) (*GetServiceAddressResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetServiceAddressResponse) + err := c.cc.Invoke(ctx, OsmanageService_GetServiceAddress_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) GetInstanceHealth(ctx context.Context, in *GetInstanceHealthRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetInstanceHealthResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[0], OsmanageService_GetInstanceHealth_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[GetInstanceHealthRequest, GetInstanceHealthResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_GetInstanceHealthClient = grpc.ServerStreamingClient[GetInstanceHealthResponse] + +func (c *osmanageServiceClient) GetInstanceStatus(ctx context.Context, in *GetInstanceStatusRequest, opts ...grpc.CallOption) (*GetInstanceStatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetInstanceStatusResponse) + err := c.cc.Invoke(ctx, OsmanageService_GetInstanceStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) UpdateBackendmanage(ctx context.Context, in *UpdateBackendmanageRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[UpdateBackendmanageResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[1], OsmanageService_UpdateBackendmanage_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[UpdateBackendmanageRequest, UpdateBackendmanageResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_UpdateBackendmanageClient = grpc.ServerStreamingClient[UpdateBackendmanageResponse] + +func (c *osmanageServiceClient) UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[UpdateInstanceResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[2], OsmanageService_UpdateInstance_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[UpdateInstanceRequest, UpdateInstanceResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_UpdateInstanceClient = grpc.ServerStreamingClient[UpdateInstanceResponse] + +func (c *osmanageServiceClient) ScaleService(ctx context.Context, in *ScaleServiceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ScaleServiceResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[3], OsmanageService_ScaleService_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[ScaleServiceRequest, ScaleServiceResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_ScaleServiceClient = grpc.ServerStreamingClient[ScaleServiceResponse] + +func (c *osmanageServiceClient) StartInstance(ctx context.Context, in *StartInstanceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StartInstanceResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[4], OsmanageService_StartInstance_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[StartInstanceRequest, StartInstanceResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_StartInstanceClient = grpc.ServerStreamingClient[StartInstanceResponse] + +func (c *osmanageServiceClient) StopInstance(ctx context.Context, in *StopInstanceRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StopInstanceResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[5], OsmanageService_StopInstance_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[StopInstanceRequest, StopInstanceResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_StopInstanceClient = grpc.ServerStreamingClient[StopInstanceResponse] + +func (c *osmanageServiceClient) GetCollection(ctx context.Context, in *GetCollectionRequest, opts ...grpc.CallOption) (*GetCollectionResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetCollectionResponse) + err := c.cc.Invoke(ctx, OsmanageService_GetCollection_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) MigrationsMigrate(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[MigrationsProgressResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[6], OsmanageService_MigrationsMigrate_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[MigrationsRequest, MigrationsProgressResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_MigrationsMigrateClient = grpc.ServerStreamingClient[MigrationsProgressResponse] + +func (c *osmanageServiceClient) MigrationsFinalize(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[MigrationsProgressResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &OsmanageService_ServiceDesc.Streams[7], OsmanageService_MigrationsFinalize_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[MigrationsRequest, MigrationsProgressResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_MigrationsFinalizeClient = grpc.ServerStreamingClient[MigrationsProgressResponse] + +func (c *osmanageServiceClient) MigrationsReset(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(MigrationsResponse) + err := c.cc.Invoke(ctx, OsmanageService_MigrationsReset_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) MigrationsClearCollectionfieldTables(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(MigrationsResponse) + err := c.cc.Invoke(ctx, OsmanageService_MigrationsClearCollectionfieldTables_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) MigrationsStats(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(MigrationsResponse) + err := c.cc.Invoke(ctx, OsmanageService_MigrationsStats_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) MigrationsProgress(ctx context.Context, in *MigrationsRequest, opts ...grpc.CallOption) (*MigrationsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(MigrationsResponse) + err := c.cc.Invoke(ctx, OsmanageService_MigrationsProgress_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *osmanageServiceClient) SendManageAction(ctx context.Context, in *SendManageActionRequest, opts ...grpc.CallOption) (*SendManageActionResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SendManageActionResponse) + err := c.cc.Invoke(ctx, OsmanageService_SendManageAction_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OsmanageServiceServer is the server API for OsmanageService service. +// All implementations must embed UnimplementedOsmanageServiceServer +// for forward compatibility. +type OsmanageServiceServer interface { + // Instance filesystem operations + SetupInstance(context.Context, *InstanceConfigRequest) (*InstanceConfigResponse, error) + ConfigInstance(context.Context, *InstanceConfigRequest) (*InstanceConfigResponse, error) + CreateInstance(context.Context, *CreateInstanceRequest) (*CreateInstanceResponse, error) + RemoveInstance(context.Context, *RemoveInstanceRequest) (*RemoveInstanceResponse, error) + // k8s actions + GetNamespaceExists(context.Context, *GetNamespaceExistsRequest) (*GetNamespaceExistsResponse, error) + GetClusterStatus(context.Context, *GetClusterStatusRequest) (*GetClusterStatusResponse, error) + GetServiceAddress(context.Context, *GetServiceAddressRequest) (*GetServiceAddressResponse, error) + GetInstanceHealth(*GetInstanceHealthRequest, grpc.ServerStreamingServer[GetInstanceHealthResponse]) error + GetInstanceStatus(context.Context, *GetInstanceStatusRequest) (*GetInstanceStatusResponse, error) + UpdateBackendmanage(*UpdateBackendmanageRequest, grpc.ServerStreamingServer[UpdateBackendmanageResponse]) error + UpdateInstance(*UpdateInstanceRequest, grpc.ServerStreamingServer[UpdateInstanceResponse]) error + ScaleService(*ScaleServiceRequest, grpc.ServerStreamingServer[ScaleServiceResponse]) error + StartInstance(*StartInstanceRequest, grpc.ServerStreamingServer[StartInstanceResponse]) error + StopInstance(*StopInstanceRequest, grpc.ServerStreamingServer[StopInstanceResponse]) error + // datastore query action (via openslides-go) + GetCollection(context.Context, *GetCollectionRequest) (*GetCollectionResponse, error) + // server side streaming + MigrationsMigrate(*MigrationsRequest, grpc.ServerStreamingServer[MigrationsProgressResponse]) error + MigrationsFinalize(*MigrationsRequest, grpc.ServerStreamingServer[MigrationsProgressResponse]) error + // unary gRPC calls + MigrationsReset(context.Context, *MigrationsRequest) (*MigrationsResponse, error) + MigrationsClearCollectionfieldTables(context.Context, *MigrationsRequest) (*MigrationsResponse, error) + MigrationsStats(context.Context, *MigrationsRequest) (*MigrationsResponse, error) + MigrationsProgress(context.Context, *MigrationsRequest) (*MigrationsResponse, error) + // generic manage action via backendmanage client + SendManageAction(context.Context, *SendManageActionRequest) (*SendManageActionResponse, error) + mustEmbedUnimplementedOsmanageServiceServer() +} + +// UnimplementedOsmanageServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedOsmanageServiceServer struct{} + +func (UnimplementedOsmanageServiceServer) SetupInstance(context.Context, *InstanceConfigRequest) (*InstanceConfigResponse, error) { + return nil, status.Error(codes.Unimplemented, "method SetupInstance not implemented") +} +func (UnimplementedOsmanageServiceServer) ConfigInstance(context.Context, *InstanceConfigRequest) (*InstanceConfigResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ConfigInstance not implemented") +} +func (UnimplementedOsmanageServiceServer) CreateInstance(context.Context, *CreateInstanceRequest) (*CreateInstanceResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateInstance not implemented") +} +func (UnimplementedOsmanageServiceServer) RemoveInstance(context.Context, *RemoveInstanceRequest) (*RemoveInstanceResponse, error) { + return nil, status.Error(codes.Unimplemented, "method RemoveInstance not implemented") +} +func (UnimplementedOsmanageServiceServer) GetNamespaceExists(context.Context, *GetNamespaceExistsRequest) (*GetNamespaceExistsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetNamespaceExists not implemented") +} +func (UnimplementedOsmanageServiceServer) GetClusterStatus(context.Context, *GetClusterStatusRequest) (*GetClusterStatusResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetClusterStatus not implemented") +} +func (UnimplementedOsmanageServiceServer) GetServiceAddress(context.Context, *GetServiceAddressRequest) (*GetServiceAddressResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetServiceAddress not implemented") +} +func (UnimplementedOsmanageServiceServer) GetInstanceHealth(*GetInstanceHealthRequest, grpc.ServerStreamingServer[GetInstanceHealthResponse]) error { + return status.Error(codes.Unimplemented, "method GetInstanceHealth not implemented") +} +func (UnimplementedOsmanageServiceServer) GetInstanceStatus(context.Context, *GetInstanceStatusRequest) (*GetInstanceStatusResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetInstanceStatus not implemented") +} +func (UnimplementedOsmanageServiceServer) UpdateBackendmanage(*UpdateBackendmanageRequest, grpc.ServerStreamingServer[UpdateBackendmanageResponse]) error { + return status.Error(codes.Unimplemented, "method UpdateBackendmanage not implemented") +} +func (UnimplementedOsmanageServiceServer) UpdateInstance(*UpdateInstanceRequest, grpc.ServerStreamingServer[UpdateInstanceResponse]) error { + return status.Error(codes.Unimplemented, "method UpdateInstance not implemented") +} +func (UnimplementedOsmanageServiceServer) ScaleService(*ScaleServiceRequest, grpc.ServerStreamingServer[ScaleServiceResponse]) error { + return status.Error(codes.Unimplemented, "method ScaleService not implemented") +} +func (UnimplementedOsmanageServiceServer) StartInstance(*StartInstanceRequest, grpc.ServerStreamingServer[StartInstanceResponse]) error { + return status.Error(codes.Unimplemented, "method StartInstance not implemented") +} +func (UnimplementedOsmanageServiceServer) StopInstance(*StopInstanceRequest, grpc.ServerStreamingServer[StopInstanceResponse]) error { + return status.Error(codes.Unimplemented, "method StopInstance not implemented") +} +func (UnimplementedOsmanageServiceServer) GetCollection(context.Context, *GetCollectionRequest) (*GetCollectionResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetCollection not implemented") +} +func (UnimplementedOsmanageServiceServer) MigrationsMigrate(*MigrationsRequest, grpc.ServerStreamingServer[MigrationsProgressResponse]) error { + return status.Error(codes.Unimplemented, "method MigrationsMigrate not implemented") +} +func (UnimplementedOsmanageServiceServer) MigrationsFinalize(*MigrationsRequest, grpc.ServerStreamingServer[MigrationsProgressResponse]) error { + return status.Error(codes.Unimplemented, "method MigrationsFinalize not implemented") +} +func (UnimplementedOsmanageServiceServer) MigrationsReset(context.Context, *MigrationsRequest) (*MigrationsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method MigrationsReset not implemented") +} +func (UnimplementedOsmanageServiceServer) MigrationsClearCollectionfieldTables(context.Context, *MigrationsRequest) (*MigrationsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method MigrationsClearCollectionfieldTables not implemented") +} +func (UnimplementedOsmanageServiceServer) MigrationsStats(context.Context, *MigrationsRequest) (*MigrationsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method MigrationsStats not implemented") +} +func (UnimplementedOsmanageServiceServer) MigrationsProgress(context.Context, *MigrationsRequest) (*MigrationsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method MigrationsProgress not implemented") +} +func (UnimplementedOsmanageServiceServer) SendManageAction(context.Context, *SendManageActionRequest) (*SendManageActionResponse, error) { + return nil, status.Error(codes.Unimplemented, "method SendManageAction not implemented") +} +func (UnimplementedOsmanageServiceServer) mustEmbedUnimplementedOsmanageServiceServer() {} +func (UnimplementedOsmanageServiceServer) testEmbeddedByValue() {} + +// UnsafeOsmanageServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OsmanageServiceServer will +// result in compilation errors. +type UnsafeOsmanageServiceServer interface { + mustEmbedUnimplementedOsmanageServiceServer() +} + +func RegisterOsmanageServiceServer(s grpc.ServiceRegistrar, srv OsmanageServiceServer) { + // If the following call panics, it indicates UnimplementedOsmanageServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&OsmanageService_ServiceDesc, srv) +} + +func _OsmanageService_SetupInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InstanceConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).SetupInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_SetupInstance_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).SetupInstance(ctx, req.(*InstanceConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_ConfigInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InstanceConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).ConfigInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_ConfigInstance_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).ConfigInstance(ctx, req.(*InstanceConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).CreateInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_CreateInstance_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).CreateInstance(ctx, req.(*CreateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_RemoveInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).RemoveInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_RemoveInstance_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).RemoveInstance(ctx, req.(*RemoveInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_GetNamespaceExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNamespaceExistsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).GetNamespaceExists(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_GetNamespaceExists_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).GetNamespaceExists(ctx, req.(*GetNamespaceExistsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_GetClusterStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).GetClusterStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_GetClusterStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).GetClusterStatus(ctx, req.(*GetClusterStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_GetServiceAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceAddressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).GetServiceAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_GetServiceAddress_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).GetServiceAddress(ctx, req.(*GetServiceAddressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_GetInstanceHealth_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetInstanceHealthRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).GetInstanceHealth(m, &grpc.GenericServerStream[GetInstanceHealthRequest, GetInstanceHealthResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_GetInstanceHealthServer = grpc.ServerStreamingServer[GetInstanceHealthResponse] + +func _OsmanageService_GetInstanceStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).GetInstanceStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_GetInstanceStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).GetInstanceStatus(ctx, req.(*GetInstanceStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_UpdateBackendmanage_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(UpdateBackendmanageRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).UpdateBackendmanage(m, &grpc.GenericServerStream[UpdateBackendmanageRequest, UpdateBackendmanageResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_UpdateBackendmanageServer = grpc.ServerStreamingServer[UpdateBackendmanageResponse] + +func _OsmanageService_UpdateInstance_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(UpdateInstanceRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).UpdateInstance(m, &grpc.GenericServerStream[UpdateInstanceRequest, UpdateInstanceResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_UpdateInstanceServer = grpc.ServerStreamingServer[UpdateInstanceResponse] + +func _OsmanageService_ScaleService_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ScaleServiceRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).ScaleService(m, &grpc.GenericServerStream[ScaleServiceRequest, ScaleServiceResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_ScaleServiceServer = grpc.ServerStreamingServer[ScaleServiceResponse] + +func _OsmanageService_StartInstance_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StartInstanceRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).StartInstance(m, &grpc.GenericServerStream[StartInstanceRequest, StartInstanceResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_StartInstanceServer = grpc.ServerStreamingServer[StartInstanceResponse] + +func _OsmanageService_StopInstance_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StopInstanceRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).StopInstance(m, &grpc.GenericServerStream[StopInstanceRequest, StopInstanceResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_StopInstanceServer = grpc.ServerStreamingServer[StopInstanceResponse] + +func _OsmanageService_GetCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCollectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).GetCollection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_GetCollection_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).GetCollection(ctx, req.(*GetCollectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_MigrationsMigrate_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(MigrationsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).MigrationsMigrate(m, &grpc.GenericServerStream[MigrationsRequest, MigrationsProgressResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_MigrationsMigrateServer = grpc.ServerStreamingServer[MigrationsProgressResponse] + +func _OsmanageService_MigrationsFinalize_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(MigrationsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OsmanageServiceServer).MigrationsFinalize(m, &grpc.GenericServerStream[MigrationsRequest, MigrationsProgressResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type OsmanageService_MigrationsFinalizeServer = grpc.ServerStreamingServer[MigrationsProgressResponse] + +func _OsmanageService_MigrationsReset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MigrationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).MigrationsReset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_MigrationsReset_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).MigrationsReset(ctx, req.(*MigrationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_MigrationsClearCollectionfieldTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MigrationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).MigrationsClearCollectionfieldTables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_MigrationsClearCollectionfieldTables_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).MigrationsClearCollectionfieldTables(ctx, req.(*MigrationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_MigrationsStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MigrationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).MigrationsStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_MigrationsStats_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).MigrationsStats(ctx, req.(*MigrationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_MigrationsProgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MigrationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).MigrationsProgress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_MigrationsProgress_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).MigrationsProgress(ctx, req.(*MigrationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OsmanageService_SendManageAction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendManageActionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OsmanageServiceServer).SendManageAction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: OsmanageService_SendManageAction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OsmanageServiceServer).SendManageAction(ctx, req.(*SendManageActionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// OsmanageService_ServiceDesc is the grpc.ServiceDesc for OsmanageService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OsmanageService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "osmanage.OsmanageService", + HandlerType: (*OsmanageServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetupInstance", + Handler: _OsmanageService_SetupInstance_Handler, + }, + { + MethodName: "ConfigInstance", + Handler: _OsmanageService_ConfigInstance_Handler, + }, + { + MethodName: "CreateInstance", + Handler: _OsmanageService_CreateInstance_Handler, + }, + { + MethodName: "RemoveInstance", + Handler: _OsmanageService_RemoveInstance_Handler, + }, + { + MethodName: "GetNamespaceExists", + Handler: _OsmanageService_GetNamespaceExists_Handler, + }, + { + MethodName: "GetClusterStatus", + Handler: _OsmanageService_GetClusterStatus_Handler, + }, + { + MethodName: "GetServiceAddress", + Handler: _OsmanageService_GetServiceAddress_Handler, + }, + { + MethodName: "GetInstanceStatus", + Handler: _OsmanageService_GetInstanceStatus_Handler, + }, + { + MethodName: "GetCollection", + Handler: _OsmanageService_GetCollection_Handler, + }, + { + MethodName: "MigrationsReset", + Handler: _OsmanageService_MigrationsReset_Handler, + }, + { + MethodName: "MigrationsClearCollectionfieldTables", + Handler: _OsmanageService_MigrationsClearCollectionfieldTables_Handler, + }, + { + MethodName: "MigrationsStats", + Handler: _OsmanageService_MigrationsStats_Handler, + }, + { + MethodName: "MigrationsProgress", + Handler: _OsmanageService_MigrationsProgress_Handler, + }, + { + MethodName: "SendManageAction", + Handler: _OsmanageService_SendManageAction_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetInstanceHealth", + Handler: _OsmanageService_GetInstanceHealth_Handler, + ServerStreams: true, + }, + { + StreamName: "UpdateBackendmanage", + Handler: _OsmanageService_UpdateBackendmanage_Handler, + ServerStreams: true, + }, + { + StreamName: "UpdateInstance", + Handler: _OsmanageService_UpdateInstance_Handler, + ServerStreams: true, + }, + { + StreamName: "ScaleService", + Handler: _OsmanageService_ScaleService_Handler, + ServerStreams: true, + }, + { + StreamName: "StartInstance", + Handler: _OsmanageService_StartInstance_Handler, + ServerStreams: true, + }, + { + StreamName: "StopInstance", + Handler: _OsmanageService_StopInstance_Handler, + ServerStreams: true, + }, + { + StreamName: "MigrationsMigrate", + Handler: _OsmanageService_MigrationsMigrate_Handler, + ServerStreams: true, + }, + { + StreamName: "MigrationsFinalize", + Handler: _OsmanageService_MigrationsFinalize_Handler, + ServerStreams: true, + }, + }, + Metadata: "proto/osmanage.proto", +}