Skip to content

Commit 4428545

Browse files
authored
Merge pull request #18149 from hakman/azure-terraform
azure: Add experimental Terraform support
2 parents b05ad3d + fa42a8c commit 4428545

48 files changed

Lines changed: 5314 additions & 23 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

cmd/kops/integration_test.go

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -323,6 +323,13 @@ func TestMinimal_v1_34(t *testing.T) {
323323
runTestTerraformAWS(t)
324324
}
325325

326+
// TestMinimalAzure runs the test on a minimum Azure configuration.
327+
func TestMinimalAzure(t *testing.T) {
328+
newIntegrationTest("minimal-azure.example.com", "minimal_azure").
329+
withVersion("v1alpha3").
330+
runTestTerraformAzure(t)
331+
}
332+
326333
// TestMinimal_NoneDNS runs the test on a minimum configuration with --dns=none
327334
func TestMinimal_NoneDNS(t *testing.T) {
328335
newIntegrationTest("minimal.example.com", "minimal-dns-none").
@@ -1736,6 +1743,111 @@ func (i *integrationTest) runTestTerraformGCE(t *testing.T) {
17361743
i.runTest(t, ctx, h, expectedFilenames, "", "", nil)
17371744
}
17381745

1746+
func (i *integrationTest) runTestTerraformAzure(t *testing.T) {
1747+
t.Setenv("AZURE_STORAGE_ACCOUNT", "teststorage")
1748+
t.Setenv("KOPS_RUN_TOO_NEW_VERSION", "1")
1749+
1750+
featureflag.ParseFlags("+Azure,+AzureTerraform")
1751+
defer featureflag.ParseFlags("-Azure,-AzureTerraform")
1752+
1753+
ctx := testcontext.ForTest(t)
1754+
h := testutils.NewIntegrationTestHarness(t)
1755+
defer h.Close()
1756+
1757+
h.MockKopsVersion("1.34.0-beta.1")
1758+
h.SetupMockAzure()
1759+
1760+
var stdout bytes.Buffer
1761+
1762+
i.srcDir = updateClusterTestBase + i.srcDir
1763+
inputYAML := "in-" + i.version + ".yaml"
1764+
1765+
factory := i.setupCluster(t, ctx, inputYAML, stdout)
1766+
1767+
options := &UpdateClusterOptions{}
1768+
options.InitDefaults()
1769+
options.Target = "terraform"
1770+
options.OutDir = path.Join(h.TempDir, "out")
1771+
options.RunTasksOptions.MaxTaskDuration = 30 * time.Second
1772+
options.CreateKubecfg = false
1773+
options.ClusterName = i.clusterName
1774+
1775+
updateClusterResults, err := RunUpdateCluster(ctx, factory, &stdout, options)
1776+
if err != nil {
1777+
t.Fatalf("error running update cluster %q: %v", i.clusterName, err)
1778+
}
1779+
1780+
for key, task := range updateClusterResults.TaskMap {
1781+
if _, err := json.Marshal(task); err != nil {
1782+
t.Errorf("unable to marshal task %q of type %T to json: %v", key, task, err)
1783+
}
1784+
}
1785+
1786+
files, err := os.ReadDir(path.Join(h.TempDir, "out"))
1787+
if err != nil {
1788+
t.Fatalf("failed to read dir: %v", err)
1789+
}
1790+
1791+
var fileNames []string
1792+
for _, f := range files {
1793+
fileNames = append(fileNames, f.Name())
1794+
}
1795+
sort.Strings(fileNames)
1796+
if actual, expected := strings.Join(fileNames, ","), "data,kubernetes.tf"; actual != expected {
1797+
t.Fatalf("unexpected files. actual=%q, expected=%q, test=%q", actual, expected, "kubernetes.tf")
1798+
}
1799+
1800+
actualTF, err := os.ReadFile(path.Join(h.TempDir, "out", "kubernetes.tf"))
1801+
if err != nil {
1802+
t.Fatalf("unexpected error reading actual terraform output: %v", err)
1803+
}
1804+
golden.AssertMatchesFile(t, string(actualTF), path.Join(i.srcDir, "kubernetes.tf"))
1805+
1806+
actualDataDir := filepath.Join(h.TempDir, "out", "data")
1807+
actualDataFiles, err := os.ReadDir(actualDataDir)
1808+
if err != nil {
1809+
t.Fatalf("failed to read data dir %q: %v", actualDataDir, err)
1810+
}
1811+
1812+
var actualDataFilenames []string
1813+
for _, f := range actualDataFiles {
1814+
actualDataFilenames = append(actualDataFilenames, f.Name())
1815+
}
1816+
sort.Strings(actualDataFilenames)
1817+
1818+
expectedDataDir := filepath.Join(i.srcDir, "data")
1819+
expectedDataFilenames := actualDataFilenames
1820+
if !golden.UpdateExpectedOutput() {
1821+
expectedDataFiles, err := os.ReadDir(expectedDataDir)
1822+
if err != nil {
1823+
t.Fatalf("failed to read data dir %q: %v", expectedDataDir, err)
1824+
}
1825+
expectedDataFilenames = make([]string, 0, len(expectedDataFiles))
1826+
for _, f := range expectedDataFiles {
1827+
expectedDataFilenames = append(expectedDataFilenames, f.Name())
1828+
}
1829+
sort.Strings(expectedDataFilenames)
1830+
}
1831+
1832+
for _, filename := range expectedDataFilenames {
1833+
expectedPath := filepath.Join(expectedDataDir, filename)
1834+
actualPath := filepath.Join(actualDataDir, filename)
1835+
actualDataContent, err := os.ReadFile(actualPath)
1836+
if err != nil {
1837+
t.Errorf("failed to read actual data file %q: %v", actualPath, err)
1838+
continue
1839+
}
1840+
golden.AssertMatchesFile(t, string(actualDataContent), expectedPath)
1841+
}
1842+
1843+
if !reflect.DeepEqual(actualDataFilenames, expectedDataFilenames) {
1844+
actual := strings.Join(actualDataFilenames, "\n")
1845+
expected := strings.Join(expectedDataFilenames, "\n")
1846+
t.Log(diff.FormatDiff(actual, expected))
1847+
t.Error("unexpected data files.")
1848+
}
1849+
}
1850+
17391851
func (i *integrationTest) runTestTerraformHetzner(t *testing.T) {
17401852
t.Setenv("KOPS_RUN_TOO_NEW_VERSION", "1")
17411853

docs/getting_started/azure.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
* Create, update and delete clusters
88
* Create, edit and delete instance groups
9+
* Generate Terraform configuration with `--target=terraform` (experimental, requires `AzureTerraform` feature flag)
910
* ...
1011

1112
## Requirements
@@ -83,7 +84,6 @@ kOps for Azure currently does not support the following features:
8384
* Azure Disk volumes
8485
* Azure Load Balancer
8586
* Autoscaling (using Cluster Autoscaler or Karpenter)
86-
* Terraform support
8787
* Multi-master clusters
8888
* ...
8989

pkg/featureflag/featureflag.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,8 @@ var (
8080
ClusterAddons = new("ClusterAddons", Bool(false))
8181
// Azure toggles the Azure support.
8282
Azure = new("Azure", Bool(false))
83+
// AzureTerraform toggles the Azure terraform support.
84+
AzureTerraform = new("AzureTerraform", Bool(false))
8385
// APIServerNodes enables ability to provision nodes that only run the kube-apiserver.
8486
APIServerNodes = new("APIServerNodes", Bool(false))
8587
// UseAddonOperators activates experimental addon operator support

pkg/model/azuremodel/network.go

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -285,6 +285,15 @@ func (b *NetworkModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
285285
}
286286
c.AddTask(ngwTask)
287287

288+
rtTask := &azuretasks.RouteTable{
289+
Name: fi.PtrTo(b.NameForRouteTable()),
290+
Lifecycle: b.Lifecycle,
291+
ResourceGroup: b.LinkToResourceGroup(),
292+
Tags: map[string]*string{},
293+
Shared: fi.PtrTo(b.Cluster.IsSharedAzureRouteTable()),
294+
}
295+
c.AddTask(rtTask)
296+
288297
for _, subnetSpec := range b.Cluster.Spec.Networking.Subnets {
289298
subnetTask := &azuretasks.Subnet{
290299
Name: fi.PtrTo(subnetSpec.Name),
@@ -293,21 +302,13 @@ func (b *NetworkModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
293302
VirtualNetwork: b.LinkToVirtualNetwork(),
294303
NatGateway: ngwTask,
295304
NetworkSecurityGroup: nsgTask,
305+
RouteTable: rtTask,
296306
CIDR: fi.PtrTo(subnetSpec.CIDR),
297307
Shared: fi.PtrTo(b.Cluster.SharedVPC()),
298308
}
299309
c.AddTask(subnetTask)
300310
}
301311

302-
rtTask := &azuretasks.RouteTable{
303-
Name: fi.PtrTo(b.NameForRouteTable()),
304-
Lifecycle: b.Lifecycle,
305-
ResourceGroup: b.LinkToResourceGroup(),
306-
Tags: map[string]*string{},
307-
Shared: fi.PtrTo(b.Cluster.IsSharedAzureRouteTable()),
308-
}
309-
c.AddTask(rtTask)
310-
311312
return nil
312313
}
313314

pkg/testutils/integrationtestharness.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ import (
5959
"k8s.io/kops/pkg/pki"
6060
"k8s.io/kops/upup/pkg/fi"
6161
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
62+
"k8s.io/kops/upup/pkg/fi/cloudup/azuretasks"
6263
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
6364
"k8s.io/kops/util/pkg/vfs"
6465
)
@@ -133,6 +134,10 @@ func (h *IntegrationTestHarness) Close() {
133134
}
134135
}
135136

137+
func (h *IntegrationTestHarness) SetupMockAzure() *azuretasks.MockAzureCloud {
138+
return azuretasks.InstallMockAzureCloud("eastus", "sub-123", "minimal-azure.example.com")
139+
}
140+
136141
func (h *IntegrationTestHarness) SetupMockAWS() *awsup.MockAWSCloud {
137142
ctx := context.TODO()
138143
cloud := awsup.InstallMockAWSCloud("us-test-1", "abc")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,134 @@
1+
#!/bin/bash
2+
set -o errexit
3+
set -o nounset
4+
set -o pipefail
5+
6+
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/nodeup-linux-amd64
7+
NODEUP_HASH_AMD64=c86e072f622b91546b7b3f3cb1a0f8a131e48b966ad018a0ac1520ceedf37725
8+
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/nodeup-linux-arm64
9+
NODEUP_HASH_ARM64=64a9a9510538a449e85d05e13e3cd98b80377d68a673447c26821d40f00f0075
10+
11+
export AZURE_STORAGE_ACCOUNT=teststorage
12+
13+
14+
15+
16+
sysctl -w net.core.rmem_max=16777216 || true
17+
sysctl -w net.core.wmem_max=16777216 || true
18+
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
19+
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
20+
21+
22+
function ensure-install-dir() {
23+
INSTALL_DIR="/opt/kops"
24+
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
25+
if [[ -d /var/lib/toolbox ]]; then
26+
INSTALL_DIR="/var/lib/toolbox/kops"
27+
fi
28+
mkdir -p ${INSTALL_DIR}/bin
29+
mkdir -p ${INSTALL_DIR}/conf
30+
cd ${INSTALL_DIR}
31+
}
32+
33+
# Retry a download until we get it. args: name, sha, urls
34+
download-or-bust() {
35+
echo "== Downloading $1 with hash $2 from $3 =="
36+
local -r file="$1"
37+
local -r hash="$2"
38+
local -a urls
39+
IFS=, read -r -a urls <<< "$3"
40+
41+
if [[ -f "${file}" ]]; then
42+
if ! validate-hash "${file}" "${hash}"; then
43+
rm -f "${file}"
44+
else
45+
return 0
46+
fi
47+
fi
48+
49+
while true; do
50+
for url in "${urls[@]}"; do
51+
commands=(
52+
"curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
53+
"wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10"
54+
"curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
55+
"wget -O ${file} --connect-timeout=20 --tries=6 --wait=10"
56+
)
57+
for cmd in "${commands[@]}"; do
58+
echo "== Downloading ${url} using ${cmd} =="
59+
if ! (${cmd} "${url}"); then
60+
echo "== Failed to download ${url} using ${cmd} =="
61+
continue
62+
fi
63+
if ! validate-hash "${file}" "${hash}"; then
64+
echo "== Failed to validate hash for ${url} =="
65+
rm -f "${file}"
66+
else
67+
echo "== Downloaded ${url} with hash ${hash} =="
68+
return 0
69+
fi
70+
done
71+
done
72+
73+
echo "== All downloads failed; sleeping before retrying =="
74+
sleep 60
75+
done
76+
}
77+
78+
validate-hash() {
79+
local -r file="$1"
80+
local -r expected="$2"
81+
local actual
82+
83+
actual=$(sha256sum "${file}" | awk '{ print $1 }') || true
84+
if [[ "${actual}" != "${expected}" ]]; then
85+
echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} =="
86+
return 1
87+
fi
88+
}
89+
90+
function download-release() {
91+
case "$(uname -m)" in
92+
x86_64*|i?86_64*|amd64*)
93+
NODEUP_URL="${NODEUP_URL_AMD64}"
94+
NODEUP_HASH="${NODEUP_HASH_AMD64}"
95+
;;
96+
aarch64*|arm64*)
97+
NODEUP_URL="${NODEUP_URL_ARM64}"
98+
NODEUP_HASH="${NODEUP_HASH_ARM64}"
99+
;;
100+
*)
101+
echo "Unsupported host arch: $(uname -m)" >&2
102+
exit 1
103+
;;
104+
esac
105+
106+
cd ${INSTALL_DIR}/bin
107+
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
108+
109+
chmod +x nodeup
110+
111+
echo "== Running nodeup =="
112+
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
113+
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
114+
}
115+
116+
####################################################################################
117+
118+
/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured =="
119+
120+
echo "== nodeup node config starting =="
121+
ensure-install-dir
122+
123+
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
124+
CloudProvider: azure
125+
ClusterName: minimal-azure.example.com
126+
ConfigBase: memfs://tests/minimal-azure.example.com
127+
InstanceGroupName: control-plane-eastus-1
128+
InstanceGroupRole: ControlPlane
129+
NodeupConfigHash: gcFFJQgFVVQBfA4OkUuHIEgG9bNZ02Sp+LIMsgJ/UEY=
130+
131+
__EOF_KUBE_ENV
132+
133+
download-release
134+
echo "== nodeup node config done =="

0 commit comments

Comments
 (0)