remove terraform

This commit is contained in:
DeveloperDurp 2026-02-02 05:18:06 -06:00
parent 5e39fb1a1d
commit 6912ca701a
163 changed files with 0 additions and 9597 deletions

View file

@ -1,115 +0,0 @@
resource "proxmox_vm_qemu" "k3smaster" {
count = local.k3smaster.count
ciuser = "administrator"
vmid = "${local.vlan}${local.k3smaster.ip[count.index]}"
name = local.k3smaster.name[count.index]
target_node = local.k3smaster.node[count.index]
clone = local.template
tags = local.k3smaster.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.k3smaster.cores
sockets = 1
cpu_type = "host"
memory = local.k3smaster.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.k3smaster.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3smaster.drive
format = local.format
storage = local.k3smaster.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3smaster.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}
resource "proxmox_vm_qemu" "k3sserver" {
count = local.k3sserver.count
ciuser = "administrator"
vmid = "${local.vlan}${local.k3sserver.ip[count.index]}"
name = local.k3sserver.name[count.index]
target_node = local.k3sserver.node[count.index]
clone = local.template
tags = local.k3sserver.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.k3sserver.cores
sockets = 1
cpu_type = "host"
memory = local.k3sserver.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.k3sserver.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3sserver.drive
format = local.format
storage = local.k3sserver.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3sserver.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View file

@ -1,48 +0,0 @@
terraform {
backend "http" {}
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc9"
}
}
}
provider "proxmox" {
pm_parallel = 1
pm_tls_insecure = true
pm_api_url = var.pm_api_url
pm_user = var.pm_user
pm_password = var.pm_password
pm_debug = false
}
locals {
sshkeys = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEphzWgwUZnvL6E5luKLt3WO0HK7Kh63arSMoNl5gmjzXyhG1DDW0OKfoIl0T+JZw/ZjQ7iii6tmSLFRk6nuYCldqe5GVcFxvTzX4/xGEioAyG0IiUGKy6s+9xzO8QXF0EtSNPH0nfHNKcCjgwWAzM+Lt6gW0Vqs+aU5ICuDiEchmvYPz+rBaVldJVTG7m3ogKJ2aIF7HU/pCPp5l0E9gMOw7s0ABijuc3KXLEWCYgL39jIST6pFH9ceRLmu8Xy5zXHAkkEEauY/e6ld0hlzLadiUD7zYJMdDcm0oRvenYcUlaUl9gS0569IpfsJsjCejuqOxCKzTHPJDOT0f9TbIqPXkGq3s9oEJGpQW+Z8g41BqRpjBCdBk+yv39bzKxlwlumDwqgx1WP8xxKavAWYNqNRG7sBhoWwtxYEOhKXoLNjBaeDRnO5OY5AQJvONWpuByyz0R/gTh4bOFVD+Y8WWlKbT4zfhnN70XvapRsbZiaGhJBPwByAMGg6XxSbC6xtbyligVGCEjCXbTLkeKq1w0DuItY+FBGO3J2k90OiciTVSeyiVz9J/Y03UB0gHdsMCoVNrj+9QWfrTLDhM7D5YrXUt5nj2LQTcbtf49zoQXWxUhozlg42E/FJU/Yla7y55qWizAEVyP2/Ks/PHrF679k59HNd2IJ/aicA9QnmWtLQ== ansible"
template = "Debian12-Template"
format = "raw"
dnsserver = "192.168.10.1"
vlan = 10
k3smaster = {
tags = "k3s_dev"
count = 3
name = ["master01-dev", "master02-dev", "master03-dev"]
cores = 2
memory = "4096"
drive = 20
storage = "cache-domains"
node = ["mothership", "overlord", "vanguard"]
ip = ["11", "12", "13"]
}
k3sserver = {
tags = "k3s_dev"
count = 3
name = ["node01-dev", "node02-dev", "node03-dev"]
cores = 4
memory = "8192"
drive = 120
storage = "cache-domains"
node = ["mothership", "overlord", "vanguard"]
ip = ["21", "22", "23"]
}
}

View file

@ -1,14 +0,0 @@
variable "pm_api_url" {
description = "API URL to Proxmox provider"
type = string
}
variable "pm_password" {
description = "Passowrd to Proxmox provider"
type = string
}
variable "pm_user" {
description = "Username to Proxmox provider"
type = string
}

View file

@ -1,115 +0,0 @@
resource "proxmox_vm_qemu" "k3smaster" {
count = local.k3smaster.count
ciuser = "administrator"
vmid = "${local.vlan}${local.k3smaster.ip[count.index]}"
name = local.k3smaster.name[count.index]
target_node = local.k3smaster.node[count.index]
clone = local.template
tags = local.k3smaster.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.k3smaster.cores
sockets = 1
cpu_type = "host"
memory = local.k3smaster.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3smaster.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3smaster.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}
resource "proxmox_vm_qemu" "k3sserver" {
count = local.k3sserver.count
ciuser = "administrator"
vmid = "${local.vlan}${local.k3sserver.ip[count.index]}"
name = local.k3sserver.name[count.index]
target_node = local.k3sserver.node[count.index]
clone = local.template
tags = local.k3sserver.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.k3sserver.cores
sockets = 1
cpu_type = "host"
memory = local.k3sserver.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3sserver.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3sserver.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View file

@ -1,58 +0,0 @@
terraform {
backend "http" {}
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc9"
}
}
}
provider "proxmox" {
pm_parallel = 1
pm_tls_insecure = true
pm_api_url = var.pm_api_url
pm_user = var.pm_user
pm_password = var.pm_password
pm_debug = false
}
locals {
sshkeys = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEphzWgwUZnvL6E5luKLt3WO0HK7Kh63arSMoNl5gmjzXyhG1DDW0OKfoIl0T+JZw/ZjQ7iii6tmSLFRk6nuYCldqe5GVcFxvTzX4/xGEioAyG0IiUGKy6s+9xzO8QXF0EtSNPH0nfHNKcCjgwWAzM+Lt6gW0Vqs+aU5ICuDiEchmvYPz+rBaVldJVTG7m3ogKJ2aIF7HU/pCPp5l0E9gMOw7s0ABijuc3KXLEWCYgL39jIST6pFH9ceRLmu8Xy5zXHAkkEEauY/e6ld0hlzLadiUD7zYJMdDcm0oRvenYcUlaUl9gS0569IpfsJsjCejuqOxCKzTHPJDOT0f9TbIqPXkGq3s9oEJGpQW+Z8g41BqRpjBCdBk+yv39bzKxlwlumDwqgx1WP8xxKavAWYNqNRG7sBhoWwtxYEOhKXoLNjBaeDRnO5OY5AQJvONWpuByyz0R/gTh4bOFVD+Y8WWlKbT4zfhnN70XvapRsbZiaGhJBPwByAMGg6XxSbC6xtbyligVGCEjCXbTLkeKq1w0DuItY+FBGO3J2k90OiciTVSeyiVz9J/Y03UB0gHdsMCoVNrj+9QWfrTLDhM7D5YrXUt5nj2LQTcbtf49zoQXWxUhozlg42E/FJU/Yla7y55qWizAEVyP2/Ks/PHrF679k59HNd2IJ/aicA9QnmWtLQ== ansible"
template = "Debian12-Template"
storage = "cache-domains"
emulatessd = true
format = "raw"
dnsserver = "192.168.98.1"
vlan = 98
k3smaster = {
tags = "k3s_dmz"
count = 3
name = ["master01-dmz", "master02-dmz", "master03-dmz"]
cores = 2
memory = "4096"
drive = 20
node = ["mothership", "overlord", "vanguard"]
ip = ["11", "12", "13"]
}
k3sserver = {
tags = "k3s_dmz"
count = 3
name = ["node01-dmz", "node02-dmz", "node03-dmz"]
cores = 4
memory = "8192"
drive = 240
node = ["mothership", "overlord", "vanguard"]
ip = ["21", "22", "23"]
}
openVPN = {
tags = "openVPN"
count = 1
name = ["openVPN"]
cores = 2
memory = "4096"
drive = 20
node = ["mothership"]
ip = ["20"]
}
}

View file

@ -1,57 +0,0 @@
resource "proxmox_vm_qemu" "openVPN" {
count = local.openVPN.count
ciuser = "administrator"
vmid = "${local.vlan}${local.openVPN.ip[count.index]}"
name = local.openVPN.name[count.index]
target_node = local.openVPN.node[count.index]
clone = local.template
tags = local.openVPN.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.openVPN.cores
sockets = 1
cpu_type = "host"
memory = local.openVPN.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.openVPN.drive
format = local.format
storage = local.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.openVPN.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View file

@ -1,14 +0,0 @@
variable "pm_api_url" {
description = "API URL to Proxmox provider"
type = string
}
variable "pm_password" {
description = "Passowrd to Proxmox provider"
type = string
}
variable "pm_user" {
description = "UIsername to Proxmox provider"
type = string
}

View file

@ -1,116 +0,0 @@
resource "proxmox_vm_qemu" "k3smaster" {
lifecycle {
prevent_destroy = true
}
count = local.k3smaster.count
ciuser = "administrator"
name = local.k3smaster.name[count.index]
target_node = local.k3smaster.node[count.index]
tags = local.k3smaster.tags
full_clone = false
qemu_os = "l26"
os_type = "cloud-init"
agent = 1
cores = local.k3smaster.cores
sockets = 1
cpu_type = "host"
memory = local.k3smaster.memory
scsihw = "virtio-scsi-pci"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.k3smaster.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3smaster.drive
format = local.format
storage = local.k3smaster.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3smaster.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}
resource "proxmox_vm_qemu" "k3sserver" {
lifecycle {
prevent_destroy = true
}
count = local.k3sserver.count
ciuser = "administrator"
name = local.k3sserver.name[count.index]
target_node = local.k3sserver.node[count.index]
tags = local.k3sserver.tags
qemu_os = "l26"
full_clone = false
os_type = "cloud-init"
agent = 1
cores = local.k3sserver.cores
sockets = 1
cpu_type = "host"
memory = local.k3sserver.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.k3sserver.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.k3sserver.drive
format = local.format
storage = local.k3sserver.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.k3sserver.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View file

@ -1,81 +0,0 @@
terraform {
backend "http" {}
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc9"
}
}
}
provider "proxmox" {
pm_parallel = 1
pm_tls_insecure = true
pm_api_url = var.pm_api_url
pm_user = var.pm_user
pm_password = var.pm_password
pm_debug = false
}
locals {
sshkeys = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEphzWgwUZnvL6E5luKLt3WO0HK7Kh63arSMoNl5gmjzXyhG1DDW0OKfoIl0T+JZw/ZjQ7iii6tmSLFRk6nuYCldqe5GVcFxvTzX4/xGEioAyG0IiUGKy6s+9xzO8QXF0EtSNPH0nfHNKcCjgwWAzM+Lt6gW0Vqs+aU5ICuDiEchmvYPz+rBaVldJVTG7m3ogKJ2aIF7HU/pCPp5l0E9gMOw7s0ABijuc3KXLEWCYgL39jIST6pFH9ceRLmu8Xy5zXHAkkEEauY/e6ld0hlzLadiUD7zYJMdDcm0oRvenYcUlaUl9gS0569IpfsJsjCejuqOxCKzTHPJDOT0f9TbIqPXkGq3s9oEJGpQW+Z8g41BqRpjBCdBk+yv39bzKxlwlumDwqgx1WP8xxKavAWYNqNRG7sBhoWwtxYEOhKXoLNjBaeDRnO5OY5AQJvONWpuByyz0R/gTh4bOFVD+Y8WWlKbT4zfhnN70XvapRsbZiaGhJBPwByAMGg6XxSbC6xtbyligVGCEjCXbTLkeKq1w0DuItY+FBGO3J2k90OiciTVSeyiVz9J/Y03UB0gHdsMCoVNrj+9QWfrTLDhM7D5YrXUt5nj2LQTcbtf49zoQXWxUhozlg42E/FJU/Yla7y55qWizAEVyP2/Ks/PHrF679k59HNd2IJ/aicA9QnmWtLQ== ansible"
template = "Debian12-Template"
format = "raw"
dnsserver = "192.168.12.1"
vlan = 12
k3smaster = {
tags = "k3s_infra"
count = 3
name = ["master01-infra", "master02-infra", "master03-infra"]
cores = 2
memory = "4096"
drive = 20
storage = "cache-domains"
node = ["mothership", "overlord", "vanguard"]
ip = ["11", "12", "13"]
}
k3sserver = {
tags = "k3s_infra"
count = 3
name = ["node01-infra", "node02-infra", "node03-infra"]
cores = 4
memory = "16384"
drive = 240
storage = "cache-domains"
node = ["mothership", "overlord", "vanguard"]
ip = ["21", "22", "23"]
}
#haproxy = {
# tags = "haproxy"
# count = 3
# name = ["haproxy-01", "haproxy-02", "haproxy-03"]
# cores = 2
# memory = "1024"
# drive = 20
# storage = "cache-domains"
# node = ["mothership", "overlord", "vanguard"]
# ip = ["31", "32", "33"]
#}
#postgres = {
# tags = "postgres"
# count = 3
# name = ["postgres-01", "postgres-02", "postgres-03"]
# cores = 4
# memory = "4096"
# drive = 40
# storage = "cache-domains"
# node = ["mothership", "overlord", "vanguard"]
# ip = ["34", "35", "36"]
#}
pihole = {
tags = "pihole"
count = 3
name = ["pihole-01", "pihole-02", "pihole-03"]
cores = 2
memory = "2048"
drive = 20
storage = "cache-domains"
node = ["mothership", "overlord", "vanguard"]
ip = ["41", "42", "43"]
}
}

View file

@ -1,57 +0,0 @@
resource "proxmox_vm_qemu" "pihole" {
count = local.pihole.count
ciuser = "administrator"
vmid = "${local.vlan}${local.pihole.ip[count.index]}"
name = local.pihole.name[count.index]
target_node = local.pihole.node[count.index]
clone = local.template
tags = local.pihole.tags
qemu_os = "l26"
full_clone = true
os_type = "cloud-init"
agent = 1
cores = local.pihole.cores
sockets = 1
cpu_type = "host"
memory = local.pihole.memory
scsihw = "virtio-scsi-pci"
#bootdisk = "scsi0"
boot = "order=virtio0"
onboot = true
sshkeys = local.sshkeys
vga {
type = "serial0"
}
serial {
id = 0
type = "socket"
}
disks {
ide {
ide2 {
cloudinit {
storage = local.pihole.storage
}
}
}
virtio {
virtio0 {
disk {
size = local.pihole.drive
format = local.format
storage = local.pihole.storage
}
}
}
}
network {
id = 0
model = "virtio"
bridge = "vmbr0"
tag = local.vlan
}
#Cloud Init Settings
ipconfig0 = "ip=192.168.${local.vlan}.${local.pihole.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
searchdomain = "durp.loc"
nameserver = local.dnsserver
}

View file

@ -1,116 +0,0 @@
#resource "proxmox_vm_qemu" "haproxy" {
# count = local.haproxy.count
# ciuser = "administrator"
# vmid = "${local.vlan}${local.haproxy.ip[count.index]}"
# name = local.haproxy.name[count.index]
# target_node = local.haproxy.node[count.index]
# clone = local.template
# tags = local.haproxy.tags
# qemu_os = "l26"
# full_clone = true
# os_type = "cloud-init"
# agent = 1
# cores = local.haproxy.cores
# sockets = 1
# cpu_type = "host"
# memory = local.haproxy.memory
# scsihw = "virtio-scsi-pci"
# #bootdisk = "scsi0"
# boot = "order=virtio0"
# onboot = true
# sshkeys = local.sshkeys
# vga {
# type = "serial0"
# }
# serial {
# id = 0
# type = "socket"
# }
# disks {
# ide {
# ide2 {
# cloudinit {
# storage = local.haproxy.storage
# }
# }
# }
# virtio {
# virtio0 {
# disk {
# size = local.haproxy.drive
# format = local.format
# storage = local.haproxy.storage
# }
# }
# }
# }
# network {
# id = 0
# model = "virtio"
# bridge = "vmbr0"
# tag = local.vlan
# }
# #Cloud Init Settings
# ipconfig0 = "ip=192.168.${local.vlan}.${local.haproxy.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
# searchdomain = "durp.loc"
# nameserver = local.dnsserver
#}
#
#resource "proxmox_vm_qemu" "postgres" {
# count = local.postgres.count
# ciuser = "administrator"
# vmid = "${local.vlan}${local.postgres.ip[count.index]}"
# name = local.postgres.name[count.index]
# target_node = local.postgres.node[count.index]
# clone = local.template
# tags = local.postgres.tags
# qemu_os = "l26"
# full_clone = true
# os_type = "cloud-init"
# agent = 1
# cores = local.postgres.cores
# sockets = 1
# cpu_type = "host"
# memory = local.postgres.memory
# scsihw = "virtio-scsi-pci"
# #bootdisk = "scsi0"
# boot = "order=virtio0"
# onboot = true
# sshkeys = local.sshkeys
# vga {
# type = "serial0"
# }
# serial {
# id = 0
# type = "socket"
# }
# disks {
# ide {
# ide2 {
# cloudinit {
# storage = local.postgres.storage
# }
# }
# }
# virtio {
# virtio0 {
# disk {
# size = local.postgres.drive
# format = local.format
# storage = local.postgres.storage
# }
# }
# }
# }
# network {
# id = 0
# model = "virtio"
# bridge = "vmbr0"
# tag = local.vlan
# }
# #Cloud Init Settings
# ipconfig0 = "ip=192.168.${local.vlan}.${local.postgres.ip[count.index]}/24,gw=192.168.${local.vlan}.1"
# searchdomain = "durp.loc"
# nameserver = local.dnsserver
#}
#

View file

@ -1,14 +0,0 @@
variable "pm_api_url" {
description = "API URL to Proxmox provider"
type = string
}
variable "pm_password" {
description = "Passowrd to Proxmox provider"
type = string
}
variable "pm_user" {
description = "UIsername to Proxmox provider"
type = string
}

View file

@ -1,14 +0,0 @@
apiVersion: v2
name: argocd
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 8.1.3

View file

@ -1,23 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: internalproxy
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/internalproxy
# directory:
# recurse: true
# destination:
# server: https://kubernetes.default.svc
# namespace: internalproxy
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true
#

View file

@ -1,59 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/argocd
destination:
namespace: argocd
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
entryPoints:
- websecure
routes:
- match: Host(`argocd.internal.durp.info`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: argocd-server
port: 443
scheme: https
tls:
secretName: argocd-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: argocd-tls
spec:
secretName: argocd-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "argocd.internal.durp.info"
dnsNames:
- "argocd.internal.durp.info"

View file

@ -1,21 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: authentik
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/authentik
# destination:
# namespace: authentik
# name: in-cluster
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true
#

View file

@ -1,23 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: bitwarden
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/bitwarden
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: bitwarden
syncPolicy:
automated:
prune: true
selfHeal: false
syncOptions:
- CreateNamespace=true

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/cert-manager
destination:
namespace: cert-manager
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,20 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: crossplane
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/crossplane
# destination:
# namespace: crossplane
# name: in-cluster
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: durpapi
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/durpapi
destination:
namespace: durpapi
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: durpot
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/durpot
destination:
namespace: durpot
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-dns
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-dns
destination:
namespace: external-dns
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-secrets
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/external-secrets
destination:
namespace: external-secrets
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,20 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: gatekeeper
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/gatekeeper
# destination:
# namespace: gatekeeper
# name: in-cluster
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true

View file

@ -1,21 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: gitlab-runner
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/gitlab-runner
# destination:
# namespace: gitlab-runner
# name: in-cluster
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true
#

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: heimdall
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/heimdall
destination:
namespace: heimdall
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: krakend
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/krakend
destination:
namespace: krakend
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,22 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: kube-prometheus-stack
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/kube-prometheus-stack
# destination:
# namespace: kube-prometheus-stack
# name: in-cluster
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true
#
#

View file

@ -1,21 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: kubeclarity
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/kubeclarity
# destination:
# namespace: kubeclarity
# name: in-cluster
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true
#

View file

@ -1,22 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: littlelink
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/littlelink
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: littlelink
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,21 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: longhorn-system
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/longhorn
destination:
namespace: longhorn-system
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,22 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metallb-system
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/metallb-system
destination:
namespace: metallb-system
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,23 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nfs-client
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/nfs-client
directory:
recurse: true
destination:
namespace: nfs-client
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,21 +0,0 @@
#apiVersion: argoproj.io/v1alpha1
#kind: Application
#metadata:
# name: open-webui
# namespace: argocd
#spec:
# project: default
# source:
# repoURL: https://gitlab.com/developerdurp/homelab.git
# targetRevision: main
# path: master/open-webui
# destination:
# namespace: open-webui
# name: in-cluster
# syncPolicy:
# automated:
# prune: true
# selfHeal: true
# syncOptions:
# - CreateNamespace=true
#

View file

@ -1,17 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vault-argocd
labels:
app.kubernetes.io/part-of: argocd
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: client-secret
data:
- secretKey: clientSecret
remoteRef:
key: secrets/argocd/authentik
property: clientsecret

View file

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: traefik
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/traefik
destination:
namespace: traefik
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,23 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: uptimekuma
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/uptimekuma
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: uptimekuma
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argocd
spec:
project: default
source:
repoURL: https://gitlab.com/developerdurp/homelab.git
targetRevision: main
path: master/vault
destination:
namespace: vault
name: in-cluster
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle

View file

@ -1,62 +0,0 @@
argo-cd:
global:
revisionHistoryLimit: 1
image:
repository: registry.durp.info/argoproj/argocd
imagePullPolicy: Always
server:
#extraArgs:
# - --dex-server-plaintext
# - --dex-server=argocd-dex-server:5556
# oidc.config: |
# name: AzureAD
# issuer: https://login.microsoftonline.com/TENANT_ID/v2.0
# clientID: CLIENT_ID
# clientSecret: $oidc.azuread.clientSecret
# requestedIDTokenClaims:
# groups:
# essential: true
# requestedScopes:
# - openid
# - profile
# - email
dex:
enabled: true
image:
repository: registry.durp.info/dexidp/dex
imagePullPolicy: Always
configs:
cm:
create: true
annotations: {}
url: https://argocd.internal.durp.info
oidc.tls.insecure.skip.verify: "true"
dex.config: |
connectors:
- config:
issuer: https://authentik.durp.info/application/o/argocd/
clientID: dbb8ffc06104fb6e7fac3e4ae7fafb1d90437625
clientSecret: $client-secret:clientSecret
insecureEnableGroups: true
scopes:
- openid
- profile
- email
- groups
name: authentik
type: oidc
id: authentik
rbac:
create: true
policy.csv: |
g, ArgoCD Admins, role:admin
scopes: "[groups]"
server:
route:
enabled: false

View file

@ -1,12 +0,0 @@
apiVersion: v2
name: authentik
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: authentik-remote-cluster
repository: https://charts.goauthentik.io
version: 2.1.0

View file

@ -1,24 +0,0 @@
#apiVersion: v1
#kind: PersistentVolume
#metadata:
# annotations:
# pv.kubernetes.io/provisioned-by: durp.info/nfs
# finalizers:
# - kubernetes.io/pv-protection
# name: authentik-pv
#spec:
# accessModes:
# - ReadWriteMany
# capacity:
# storage: 10Gi
# claimRef:
# apiVersion: v1
# kind: PersistentVolumeClaim
# name: authentik-pvc
# namespace: authentik
# nfs:
# path: /mnt/user/k3s/authentik
# server: 192.168.20.253
# persistentVolumeReclaimPolicy: Retain
# storageClassName: nfs-storage
# volumeMode: Filesystem

View file

@ -1,18 +0,0 @@
#apiVersion: v1
#kind: PersistentVolumeClaim
#metadata:
# labels:
# app.kubernetes.io/component: app
# app.kubernetes.io/instance: authentik
# app.kubernetes.io/managed-by: Helm
# app.kubernetes.io/name: authentik
# helm.sh/chart: authentik-2.14.4
# name: authentik-pvc
# namespace: authentik
#spec:
# accessModes:
# - ReadWriteMany
# resources:
# requests:
# storage: 10Gi
# storageClassName: nfs-storage

View file

@ -1,60 +0,0 @@
#apiVersion: traefik.containo.us/v1alpha1
#kind: IngressRoute
#metadata:
# name: authentik-ingress
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`authentik.durp.info`) && PathPrefix(`/`)
# kind: Rule
# services:
# - name: authentik-server
# port: 80
# tls:
# secretName: authentik-tls
#
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: authentik-tls
spec:
secretName: authentik-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "outpost.internal.durp.info"
dnsNames:
- "outpost.internal.durp.info"
#---
#
#kind: Service
#apiVersion: v1
#metadata:
# name: authentik-external-dns
# annotations:
# external-dns.alpha.kubernetes.io/hostname: authentik.durp.info
#spec:
# type: ExternalName
# externalName: durp.info
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: outpost-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`outpost.internal.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: ak-outpost-master-embedded-outpost
port: 9000
tls:
secretName: authentik-tls

View file

@ -1,28 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: db-pass
data:
- secretKey: dbpass
remoteRef:
key: secrets/authentik/database
property: dbpass
- secretKey: secretkey
remoteRef:
key: secrets/authentik/database
property: secretkey
- secretKey: postgresql-postgres-password
remoteRef:
key: secrets/authentik/database
property: dbpass
- secretKey: postgresql-password
remoteRef:
key: secrets/authentik/database
property: dbpass

View file

@ -1,30 +0,0 @@
authentik-remote-cluster:
# -- Provide a name in place of `authentik`. Prefer using global.nameOverride if possible
nameOverride: ""
# -- String to fully override `"authentik.fullname"`. Prefer using global.fullnameOverride if possible
fullnameOverride: ""
# -- Override the Kubernetes version, which is used to evaluate certain manifests
kubeVersionOverride: ""
## Globally shared configuration for authentik components.
global:
# -- Provide a name in place of `authentik`
nameOverride: ""
# -- String to fully override `"authentik.fullname"`
fullnameOverride: ""
# -- A custom namespace to override the default namespace for the deployed resources.
namespaceOverride: ""
# -- Common labels for all resources.
additionalLabels: {}
# app: authentik
# -- Annotations to apply to all resources
annotations: {}
serviceAccountSecret:
# -- Create a secret with the service account credentials
enabled: true
clusterRole:
# -- Create a clusterole in addition to a namespaced role.
enabled: true

View file

@ -1,7 +0,0 @@
apiVersion: v2
name: bitwarden
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"

View file

@ -1,25 +0,0 @@
#apiVersion: v1
#kind: PersistentVolume
#metadata:
# annotations:
# pv.kubernetes.io/provisioned-by: durp.info/nfs
# finalizers:
# - kubernetes.io/pv-protection
# name: bitwarden-pv
#spec:
# accessModes:
# - ReadWriteMany
# capacity:
# storage: 10Gi
# claimRef:
# apiVersion: v1
# kind: PersistentVolumeClaim
# name: bitwarden-pvc
# namespace: bitwarden
# nfs:
# path: /mnt/user/k3s/bitwarden
# server: 192.168.20.253
# persistentVolumeReclaimPolicy: Retain
# storageClassName: nfs-storage
# volumeMode: Filesystem
#

View file

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bitwarden-pvc
spec:
storageClassName: longhorn
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

View file

@ -1,50 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: bitwarden
name: bitwarden
labels:
app: bitwarden
spec:
selector:
matchLabels:
app: bitwarden
replicas: 1
template:
metadata:
labels:
app: bitwarden
spec:
containers:
- name: bitwarden
image: registry.durp.info/vaultwarden/server:1.32.7
imagePullPolicy: Always
volumeMounts:
- name: bitwarden-pvc
mountPath: /data
subPath: bitwaren-data
ports:
- name: http
containerPort: 80
env:
- name: SIGNUPS_ALLOWED
value: "FALSE"
- name: INVITATIONS_ALLOWED
value: "FALSE"
- name: WEBSOCKET_ENABLED
value: "TRUE"
- name: ROCKET_ENV
value: "staging"
- name: ROCKET_PORT
value: "80"
- name: ROCKET_WORKERS
value: "10"
- name: SECRET_USERNAME
valueFrom:
secretKeyRef:
name: bitwarden-secret
key: ADMIN_TOKEN
volumes:
- name: bitwarden-pvc
persistentVolumeClaim:
claimName: bitwarden-pvc

View file

@ -1,42 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: bitwarden-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`bitwarden.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: bitwarden
port: 80
tls:
secretName: bitwarden-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: bitwarden-tls
spec:
secretName: bitwarden-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "bitwarden.durp.info"
dnsNames:
- "bitwarden.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: bitwarden-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: bitwarden.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,16 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: bitwarden-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: bitwarden-secret
data:
- secretKey: ADMIN_TOKEN
remoteRef:
key: secrets/bitwarden/admin
property: ADMIN_TOKEN

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: bitwarden
spec:
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
selector:
app: bitwarden

View file

@ -1,11 +0,0 @@
apiVersion: v2
name: cert-manager
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.17.2

View file

@ -1,16 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-production
solvers:
- dns01:
cloudflare:
email: developerdurp@durp.info
apiTokenSecretRef:
name: cloudflare-api-token-secret
key: cloudflare-api-token-secret

View file

@ -1,16 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- dns01:
cloudflare:
email: developerdurp@durp.info
apiTokenSecretRef:
name: cloudflare-api-token-secret
key: cloudflare-api-token-secret

View file

@ -1,16 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: cloudflare-api-token-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: cloudflare-api-token-secret
data:
- secretKey: cloudflare-api-token-secret
remoteRef:
key: secrets/cert-manager
property: cloudflare-api-token-secret

View file

@ -1,25 +0,0 @@
cert-manager:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-controller
pullPolicy: Always
installCRDs: true
replicaCount: 3
extraArgs:
- --dns01-recursive-nameservers=1.1.1.1:53,1.0.0.1:53
- --dns01-recursive-nameservers-only
podDnsPolicy: None
podDnsConfig:
nameservers:
- "1.1.1.1"
- "1.0.0.1"
webhook:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-webhook
pullPolicy: Always
cainjector:
image:
registry: registry.internal.durp.info
repository: jetstack/cert-manager-cainjector
pullPolicy: Always

View file

@ -1,12 +0,0 @@
apiVersion: v2
name: crossplane
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.16.0"
dependencies:
- name: crossplane
repository: https://charts.crossplane.io/stable
version: 1.20.0

View file

@ -1,55 +0,0 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-gitlab
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-gitlab:v0.5.0
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: gitlab-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: gitlab-secret
data:
- secretKey: accesstoken
remoteRef:
key: secrets/gitlab/token
property: accesstoken
---
#apiVersion: gitlab.crossplane.io/v1beta1
#kind: ProviderConfig
#metadata:
# name: gitlab-provider
#spec:
# baseURL: https://gitlab.com/
# credentials:
# source: Secret
# secretRef:
# namespace: crossplane
# name: gitlab-secret
# key: accesstoken
#
#---
#
#apiVersion: projects.gitlab.crossplane.io/v1alpha1
#kind: Project
#metadata:
# name: example-project
#spec:
# deletionPolicy: Orphan
# forProvider:
# name: "Example Project"
# description: "example project description"
# providerConfigRef:
# name: gitlab-provider
# policy:
# resolution: Optional
# resolve: Always

File diff suppressed because it is too large Load diff

View file

@ -1,13 +0,0 @@
apiVersion: v2
name: durpapi
description: A Helm chart for Kubernetes
type: application
version: 0.1.0-dev0184
appVersion: 0.1.0
dependencies:
- condition: postgresql.enabled
version: 16.7.*
repository: https://charts.bitnami.com/bitnami
name: postgresql

View file

@ -1,38 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Chart.Name }}
labels:
app: {{ .Chart.Name }}
spec:
revisionHistoryLimit: 1
selector:
matchLabels:
app: {{ .Chart.Name }}
replicas: {{ .Values.deployment.hpa.minReplicas }}
template:
metadata:
labels:
app: {{ .Chart.Name }}
spec:
containers:
- name: api
image: "{{ .Values.deployment.image }}:{{ default .Chart.Version .Values.deployment.tag }}"
imagePullPolicy: {{ .Values.deployment.imagePullPolicy }}
readinessProbe:
{{- toYaml .Values.deployment.probe.readiness | nindent 12 }}
livenessProbe:
{{- toYaml .Values.deployment.probe.liveness | nindent 12 }}
startupProbe:
{{- toYaml .Values.deployment.probe.startup | nindent 12 }}
ports:
- name: http
containerPort: {{ .Values.service.targetport }}
env:
- name: host
value: {{ .Values.swagger.host }}
- name: version
value: {{ default .Chart.Version .Values.deployment.tag }}
envFrom:
- secretRef:
name: {{ .Values.deployment.secretfile }}

View file

@ -1,24 +0,0 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: "{{ .Chart.Name }}-hpa"
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .Chart.Name }}
minReplicas: {{ .Values.deployment.hpa.minReplicas }}
maxReplicas: {{ .Values.deployment.hpa.maxReplicas }}
metrics:
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 40

View file

@ -1,44 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: "{{ .Chart.Name }}-ingress"
spec:
entryPoints:
- websecure
routes:
- match: Host("api.durp.info") && PathPrefix(`/api`)
kind: Rule
middlewares:
- name: jwt
services:
- name: "durpapi-service"
port: 80
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: "{{ .Chart.Name }}-swagger"
spec:
entryPoints:
- websecure
routes:
- match: Host("api.durp.info") && PathPrefix(`/swagger`)
kind: Rule
services:
- name: "durpapi-service"
port: 80
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: jwt
spec:
plugin:
jwt:
Required: true
Keys:
- https://authentik.durp.info/application/o/api/jwks

View file

@ -1,39 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: durpapi-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: durpapi-secret
data:
- secretKey: db_host
remoteRef:
key: secrets/durpapi/postgres
property: db_host
- secretKey: db_port
remoteRef:
key: secrets/durpapi/postgres
property: db_port
- secretKey: db_pass
remoteRef:
key: secrets/durpapi/postgres
property: db_pass
- secretKey: db_user
remoteRef:
key: secrets/durpapi/postgres
property: db_user
- secretKey: db_sslmode
remoteRef:
key: secrets/durpapi/postgres
property: db_sslmode
- secretKey: db_name
remoteRef:
key: secrets/durpapi/postgres
property: db_name
- secretKey: llamaurl
remoteRef:
key: secrets/durpapi/llamaurl
property: llamaurl

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "{{ .Chart.Name }}-service"
spec:
ports:
- name: http
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetport }}
protocol: TCP
selector:
app: {{ .Chart.Name }}

View file

@ -1,39 +0,0 @@
ingress:
enabled: false
deployment:
image: registry.durp.info/developerdurp/durpapi
secretfile: durpapi-secret
imagePullPolicy: Always
hpa:
minReplicas: 3
maxReplicas: 10
probe:
readiness:
httpGet:
path: /health/gethealth
port: 8080
liveness:
httpGet:
path: /health/gethealth
port: 8080
startup:
httpGet:
path: /health/gethealth
port: 8080
service:
type: ClusterIP
port: 80
targetport: 8080
swagger:
host: api.durp.info
postgresql:
enabled: true
auth:
existingSecret: durpapi-secret
secretKeys:
adminPasswordKey: db_pass
userPasswordKey: db_pass
replicationPasswordKey: db_pass
database: postgres
username: postgres

View file

@ -1,11 +0,0 @@
apiVersion: v2
name: durpapi
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: durpot
repository: https://gitlab.com/api/v4/projects/45025485/packages/helm/stable
version: 0.1.0-dev0038

View file

@ -1,43 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: durpot-secert
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: durpot-secret
data:
- secretKey: OPENAI_API_KEY
remoteRef:
key: secrets/durpot/openai
property: OPENAI_API_KEY
- secretKey: BOTPREFIX
remoteRef:
key: secrets/durpot/discord
property: BOTPREFIX
- secretKey: ChannelID
remoteRef:
key: secrets/durpot/discord
property: ChannelID
- secretKey: TOKEN
remoteRef:
key: secrets/durpot/discord
property: TOKEN
- secretKey: ClientID
remoteRef:
key: secrets/durpot/auth
property: ClientID
- secretKey: Password
remoteRef:
key: secrets/durpot/auth
property: Password
- secretKey: TokenURL
remoteRef:
key: secrets/durpot/auth
property: TokenURL
- secretKey: Username
remoteRef:
key: secrets/durpot/auth
property: Username

View file

@ -1,12 +0,0 @@
apiVersion: v2
name: external-dns
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: external-dns
repository: https://charts.bitnami.com/bitnami
version: 8.9.2

View file

@ -1,23 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: external-dns-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: external-dns
data:
- secretKey: cloudflare_api_email
remoteRef:
key: secrets/external-dns/cloudflare
property: cloudflare_api_email
- secretKey: cloudflare_api_key
remoteRef:
key: secrets/external-dns/cloudflare
property: cloudflare_api_key
- secretKey: cloudflare_api_token
remoteRef:
key: secrets/external-dns/cloudflare
property: cloudflare_api_token

View file

@ -1,16 +0,0 @@
external-dns:
global:
imageRegistry: "registry.internal.durp.info"
image:
pullPolicy: Always
sources:
- service
provider: cloudflare
cloudflare:
secretName : "external-dns"
proxied: false
policy: sync

View file

@ -1,12 +0,0 @@
apiVersion: v2
name: external-secrets
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: external-secrets
repository: https://charts.external-secrets.io
version: 0.17.0

View file

@ -1,463 +0,0 @@
external-secrets:
replicaCount: 3
# -- Specifies the amount of historic ReplicaSets k8s should keep (see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#clean-up-policy)
revisionHistoryLimit: 10
image:
repository: ghcr.io/external-secrets/external-secrets
pullPolicy: Always
# -- The image tag to use. The default is the chart appVersion.
# There are different image flavours available, like distroless and ubi.
# Please see GitHub release notes for image tags for these flavors.
# By default the distroless image is used.
tag: ""
# -- If set, install and upgrade CRDs through helm chart.
installCRDs: true
crds:
# -- If true, create CRDs for Cluster External Secret.
createClusterExternalSecret: true
# -- If true, create CRDs for Cluster Secret Store.
createClusterSecretStore: true
# -- If true, create CRDs for Push Secret.
createPushSecret: true
annotations: {}
conversion:
enabled: true
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# -- If true, external-secrets will perform leader election between instances to ensure no more
# than one instance of external-secrets operates at a time.
leaderElect: true
# -- If set external secrets will filter matching
# Secret Stores with the appropriate controller values.
controllerClass: ""
# -- If true external secrets will use recommended kubernetes
# annotations as prometheus metric labels.
extendedMetricLabels: false
# -- If set external secrets are only reconciled in the
# provided namespace
scopedNamespace: ""
# -- Must be used with scopedNamespace. If true, create scoped RBAC roles under the scoped namespace
# and implicitly disable cluster stores and cluster external secrets
scopedRBAC: false
# -- if true, the operator will process cluster external secret. Else, it will ignore them.
processClusterExternalSecret: true
# -- if true, the operator will process cluster store. Else, it will ignore them.
processClusterStore: true
# -- Specifies whether an external secret operator deployment be created.
createOperator: true
# -- Specifies the number of concurrent ExternalSecret Reconciles external-secret executes at
# a time.
concurrent: 1
serviceAccount:
# -- Specifies whether a service account should be created.
create: true
# -- Automounts the service account token in all containers of the pod
automount: true
# -- Annotations to add to the service account.
annotations: {}
# -- Extra Labels to add to the service account.
extraLabels: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
rbac:
# -- Specifies whether role and rolebinding resources should be created.
create: true
## -- Extra environment variables to add to container.
extraEnv: []
## -- Map of extra arguments to pass to container.
extraArgs: {}
## -- Extra volumes to pass to pod.
extraVolumes: []
## -- Extra volumes to mount to the container.
extraVolumeMounts: []
## -- Extra containers to add to the pod.
extraContainers: []
# -- Annotations to add to Deployment
deploymentAnnotations: {}
# -- Annotations to add to Pod
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
prometheus:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead.
enabled: false
service:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead.
port: 8080
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- namespace where you want to install ServiceMonitors
namespace: ""
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
# -- Let prometheus add an exported_ prefix to conflicting labels
honorLabels: false
# -- Metric relabel configs to apply to samples before ingestion. [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
metricRelabelings: []
# - action: replace
# regex: (.*)
# replacement: $1
# sourceLabels:
# - exported_namespace
# targetLabel: namespace
# -- Relabel configs to apply to samples before ingestion. [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
metrics:
service:
# -- Enable if you use another monitoring tool than Prometheus to scrape the metrics
enabled: false
# -- Metrics service port to scrape
port: 8080
# -- Additional service annotations
annotations: {}
nodeSelector: {}
tolerations: []
topologySpreadConstraints: []
affinity: {}
# -- Pod priority class name.
priorityClassName: ""
# -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
# -- Run the controller on the host network
hostNetwork: false
webhook:
# -- Specifies whether a webhook deployment be created.
create: true
# -- Specifices the time to check if the cert is valid
certCheckInterval: "5m"
# -- Specifices the lookaheadInterval for certificate validity
lookaheadInterval: ""
replicaCount: 1
# -- Specifies the amount of historic ReplicaSets k8s should keep (see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#clean-up-policy)
revisionHistoryLimit: 10
certDir: /tmp/certs
# -- Specifies whether validating webhooks should be created with failurePolicy: Fail or Ignore
failurePolicy: Fail
# -- Specifies if webhook pod should use hostNetwork or not.
hostNetwork: false
image:
repository: ghcr.io/external-secrets/external-secrets
pullPolicy: IfNotPresent
# -- The image tag to use. The default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# -- The port the webhook will listen to
port: 10250
rbac:
# -- Specifies whether role and rolebinding resources should be created.
create: true
serviceAccount:
# -- Specifies whether a service account should be created.
create: true
# -- Automounts the service account token in all containers of the pod
automount: true
# -- Annotations to add to the service account.
annotations: {}
# -- Extra Labels to add to the service account.
extraLabels: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
nodeSelector: {}
tolerations: []
topologySpreadConstraints: []
affinity: {}
# -- Pod priority class name.
priorityClassName: ""
# -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
prometheus:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
enabled: false
service:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
port: 8080
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
metrics:
service:
# -- Enable if you use another monitoring tool than Prometheus to scrape the metrics
enabled: false
# -- Metrics service port to scrape
port: 8080
# -- Additional service annotations
annotations: {}
readinessProbe:
# -- Address for readiness probe
address: ""
# -- ReadinessProbe port for kubelet
port: 8081
## -- Extra environment variables to add to container.
extraEnv: []
## -- Map of extra arguments to pass to container.
extraArgs: {}
## -- Extra volumes to pass to pod.
extraVolumes: []
## -- Extra volumes to mount to the container.
extraVolumeMounts: []
# -- Annotations to add to Secret
secretAnnotations: {}
# -- Annotations to add to Deployment
deploymentAnnotations: {}
# -- Annotations to add to Pod
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
certController:
# -- Specifies whether a certificate controller deployment be created.
create: true
requeueInterval: "5m"
replicaCount: 1
# -- Specifies the amount of historic ReplicaSets k8s should keep (see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#clean-up-policy)
revisionHistoryLimit: 10
image:
repository: ghcr.io/external-secrets/external-secrets
pullPolicy: Always
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
rbac:
# -- Specifies whether role and rolebinding resources should be created.
create: true
serviceAccount:
# -- Specifies whether a service account should be created.
create: true
# -- Automounts the service account token in all containers of the pod
automount: true
# -- Annotations to add to the service account.
annotations: {}
# -- Extra Labels to add to the service account.
extraLabels: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template.
name: ""
nodeSelector: {}
tolerations: []
topologySpreadConstraints: []
affinity: {}
# -- Run the certController on the host network
hostNetwork: false
# -- Pod priority class name.
priorityClassName: ""
# -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
prometheus:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
enabled: false
service:
# -- deprecated. will be removed with 0.7.0, use serviceMonitor instead
port: 8080
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
# -- Additional labels
additionalLabels: {}
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
metrics:
service:
# -- Enable if you use another monitoring tool than Prometheus to scrape the metrics
enabled: false
# -- Metrics service port to scrape
port: 8080
# -- Additional service annotations
annotations: {}
## -- Extra environment variables to add to container.
extraEnv: []
## -- Map of extra arguments to pass to container.
extraArgs: {}
## -- Extra volumes to pass to pod.
extraVolumes: []
## -- Extra volumes to mount to the container.
extraVolumeMounts: []
# -- Annotations to add to Deployment
deploymentAnnotations: {}
# -- Annotations to add to Pod
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
# -- Specifies `dnsOptions` to deployment
dnsConfig: {}

View file

@ -1,11 +0,0 @@
apiVersion: v2
name: gatekeeper
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: gatekeeper
repository: https://open-policy-agent.github.io/gatekeeper/charts
version: 3.19.2

View file

@ -1,278 +0,0 @@
#gatekeeper:
# replicas: 3
# revisionHistoryLimit: 10
# auditInterval: 60
# metricsBackends: ["prometheus"]
# auditMatchKindOnly: false
# constraintViolationsLimit: 20
# auditFromCache: false
# disableMutation: false
# disableValidatingWebhook: false
# validatingWebhookName: gatekeeper-validating-webhook-configuration
# validatingWebhookTimeoutSeconds: 3
# validatingWebhookFailurePolicy: Ignore
# validatingWebhookAnnotations: {}
# validatingWebhookExemptNamespacesLabels: {}
# validatingWebhookObjectSelector: {}
# validatingWebhookCheckIgnoreFailurePolicy: Fail
# validatingWebhookCustomRules: {}
# validatingWebhookURL: null
# enableDeleteOperations: false
# enableExternalData: true
# enableGeneratorResourceExpansion: true
# enableTLSHealthcheck: false
# maxServingThreads: -1
# mutatingWebhookName: gatekeeper-mutating-webhook-configuration
# mutatingWebhookFailurePolicy: Ignore
# mutatingWebhookReinvocationPolicy: Never
# mutatingWebhookAnnotations: {}
# mutatingWebhookExemptNamespacesLabels: {}
# mutatingWebhookObjectSelector: {}
# mutatingWebhookTimeoutSeconds: 1
# mutatingWebhookCustomRules: {}
# mutatingWebhookURL: null
# mutationAnnotations: false
# auditChunkSize: 500
# logLevel: INFO
# logDenies: false
# logMutations: false
# emitAdmissionEvents: false
# emitAuditEvents: false
# admissionEventsInvolvedNamespace: false
# auditEventsInvolvedNamespace: false
# resourceQuota: true
# externaldataProviderResponseCacheTTL: 3m
# image:
# repository: openpolicyagent/gatekeeper
# crdRepository: openpolicyagent/gatekeeper-crds
# release: v3.15.0-beta.0
# pullPolicy: Always
# pullSecrets: []
# preInstall:
# crdRepository:
# image:
# repository: null
# tag: v3.15.0-beta.0
# postUpgrade:
# labelNamespace:
# enabled: false
# image:
# repository: openpolicyagent/gatekeeper-crds
# tag: v3.15.0-beta.0
# pullPolicy: IfNotPresent
# pullSecrets: []
# extraNamespaces: []
# podSecurity: ["pod-security.kubernetes.io/audit=restricted",
# "pod-security.kubernetes.io/audit-version=latest",
# "pod-security.kubernetes.io/warn=restricted",
# "pod-security.kubernetes.io/warn-version=latest",
# "pod-security.kubernetes.io/enforce=restricted",
# "pod-security.kubernetes.io/enforce-version=v1.24"]
# extraAnnotations: {}
# priorityClassName: ""
# affinity: {}
# tolerations: []
# nodeSelector: {kubernetes.io/os: linux}
# resources: {}
# securityContext:
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsGroup: 999
# runAsNonRoot: true
# runAsUser: 1000
# postInstall:
# labelNamespace:
# enabled: true
# extraRules: []
# image:
# repository: openpolicyagent/gatekeeper-crds
# tag: v3.15.0-beta.0
# pullPolicy: IfNotPresent
# pullSecrets: []
# extraNamespaces: []
# podSecurity: ["pod-security.kubernetes.io/audit=restricted",
# "pod-security.kubernetes.io/audit-version=latest",
# "pod-security.kubernetes.io/warn=restricted",
# "pod-security.kubernetes.io/warn-version=latest",
# "pod-security.kubernetes.io/enforce=restricted",
# "pod-security.kubernetes.io/enforce-version=v1.24"]
# extraAnnotations: {}
# priorityClassName: ""
# probeWebhook:
# enabled: true
# image:
# repository: curlimages/curl
# tag: 7.83.1
# pullPolicy: IfNotPresent
# pullSecrets: []
# waitTimeout: 60
# httpTimeout: 2
# insecureHTTPS: false
# priorityClassName: ""
# affinity: {}
# tolerations: []
# nodeSelector: {kubernetes.io/os: linux}
# securityContext:
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsGroup: 999
# runAsNonRoot: true
# runAsUser: 1000
# preUninstall:
# deleteWebhookConfigurations:
# extraRules: []
# enabled: false
# image:
# repository: openpolicyagent/gatekeeper-crds
# tag: v3.15.0-beta.0
# pullPolicy: IfNotPresent
# pullSecrets: []
# priorityClassName: ""
# affinity: {}
# tolerations: []
# nodeSelector: {kubernetes.io/os: linux}
# resources: {}
# securityContext:
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsGroup: 999
# runAsNonRoot: true
# runAsUser: 1000
# podAnnotations: {}
# auditPodAnnotations: {}
# podLabels: {}
# podCountLimit: "100"
# secretAnnotations: {}
# enableRuntimeDefaultSeccompProfile: true
# controllerManager:
# exemptNamespaces: []
# exemptNamespacePrefixes: []
# hostNetwork: false
# dnsPolicy: ClusterFirst
# port: 8443
# metricsPort: 8888
# healthPort: 9090
# readinessTimeout: 1
# livenessTimeout: 1
# priorityClassName: system-cluster-critical
# disableCertRotation: false
# tlsMinVersion: 1.3
# clientCertName: ""
# strategyType: RollingUpdate
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: gatekeeper.sh/operation
# operator: In
# values:
# - webhook
# topologyKey: kubernetes.io/hostname
# weight: 100
# topologySpreadConstraints: []
# tolerations: []
# nodeSelector: {kubernetes.io/os: linux}
# resources:
# limits:
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 512Mi
# securityContext:
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsGroup: 999
# runAsNonRoot: true
# runAsUser: 1000
# podSecurityContext:
# fsGroup: 999
# supplementalGroups:
# - 999
# extraRules: []
# networkPolicy:
# enabled: false
# ingress: { }
# # - from:
# # - ipBlock:
# # cidr: 0.0.0.0/0
# audit:
# enablePubsub: false
# connection: audit-connection
# channel: audit-channel
# hostNetwork: false
# dnsPolicy: ClusterFirst
# metricsPort: 8888
# healthPort: 9090
# readinessTimeout: 1
# livenessTimeout: 1
# priorityClassName: system-cluster-critical
# disableCertRotation: false
# affinity: {}
# tolerations: []
# nodeSelector: {kubernetes.io/os: linux}
# resources:
# limits:
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 512Mi
# securityContext:
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsGroup: 999
# runAsNonRoot: true
# runAsUser: 1000
# podSecurityContext:
# fsGroup: 999
# supplementalGroups:
# - 999
# writeToRAMDisk: false
# extraRules: []
# crds:
# affinity: {}
# tolerations: []
# nodeSelector: {kubernetes.io/os: linux}
# resources: {}
# securityContext:
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsGroup: 65532
# runAsNonRoot: true
# runAsUser: 65532
# pdb:
# controllerManager:
# minAvailable: 1
# service: {}
# disabledBuiltins: ["{http.send}"]
# psp:
# enabled: true
# upgradeCRDs:
# enabled: true
# extraRules: []
# priorityClassName: ""
# rbac:
# create: true
# externalCertInjection:
# enabled: false
# secretName: gatekeeper-webhook-server-cert
#

View file

@ -1,11 +0,0 @@
apiVersion: v2
name: gitlab-runner
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: gitlab-runner
repository: https://charts.gitlab.io/
version: 0.77.2

View file

@ -1,19 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: gitlab-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: gitlab-secret
data:
- secretKey: runner-registration-token
remoteRef:
key: secrets/gitlab/runner
property: runner-registration-token
- secretKey: runner-token
remoteRef:
key: secrets/gitlab/runner
property: runner-token

View file

@ -1,71 +0,0 @@
gitlab-runner:
image:
registry: registry.internal.durp.info
image: gitlab-org/gitlab-runner
imagePullPolicy: Always
gitlabUrl: https://gitlab.com/
unregisterRunner: false
terminationGracePeriodSeconds: 3600
concurrent: 10
checkInterval: 30
rbac:
create: true
rules: []
clusterWideAccess: false
podSecurityPolicy:
enabled: false
resourceNames:
- gitlab-runner
metrics:
enabled: true
serviceMonitor:
enabled: true
service:
enabled: true
annotations: {}
runners:
config: |
[[runners]]
[runners.kubernetes]
namespace = "{{.Release.Namespace}}"
image = "ubuntu:22.04"
privileged = true
executor: kubernetes
name: "k3s"
runUntagged: true
privileged: true
secret: gitlab-secret
#builds:
#cpuLimit: 200m
#cpuLimitOverwriteMaxAllowed: 400m
#memoryLimit: 256Mi
#memoryLimitOverwriteMaxAllowed: 512Mi
#cpuRequests: 100m
#cpuRequestsOverwriteMaxAllowed: 200m
#memoryRequests: 128Mi
#memoryRequestsOverwriteMaxAllowed: 256Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
runAsNonRoot: true
privileged: false
capabilities:
drop: ["ALL"]
podSecurityContext:
runAsUser: 100
fsGroup: 65533
resources:
limits:
memory: 2Gi
requests:
memory: 128Mi
cpu: 500m

View file

@ -1,11 +0,0 @@
apiVersion: v2
name: heimdall
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 0.0.1
dependencies:
- name: heimdall
repository: https://djjudas21.github.io/charts/
version: 8.5.4

View file

@ -1,52 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
annotations:
name: heimdall-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`heimdall.durp.info`) && PathPrefix(`/`)
middlewares:
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: heimdall
port: 80
- match: Host(`heimdall.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule
services:
- name: ak-outpost-authentik-embedded-outpost
namespace: authentik
port: 9000
tls:
secretName: heimdall-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: heimdall-tls
spec:
secretName: heimdall-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "heimdall.durp.info"
dnsNames:
- "heimdall.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: heimdall-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: heimdall.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,28 +0,0 @@
heimdall:
image:
registry:
repository: registry.internal.durp.info/linuxserver/heimdall
pullPolicy: Always
env:
TZ: UTC
PUID: "1000"
PGID: "1000"
service:
main:
annotations:
external-dns.alpha.kubernetes.io/hostname: heimdall.durp.info
external-dns.alpha.kubernetes.io/target: home.durp.info
ports:
http:
port: 80
ingress:
main:
enabled: false
persistence:
config:
enabled: true

View file

@ -1,7 +0,0 @@
apiVersion: v2
name: internalproxy
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "0.1.0"

View file

@ -1,46 +0,0 @@
#apiVersion: traefik.io/v1alpha1
#kind: IngressRoute
#metadata:
# name: argocd-ingress
# annotations:
# cert-manager.io/cluster-issuer: letsencrypt-production
#spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`argocd.internal.durp.info`)
# middlewares:
# - name: whitelist
# namespace: traefik
# kind: Rule
# services:
# - name: argocd-server
# port: 443
# scheme: https
# tls:
# secretName: argocd-tls
#
#---
#
#kind: Service
#apiVersion: v1
#metadata:
# name: argocd-server
#spec:
# type: ExternalName
# externalName: argocd-server.argocd.svc.cluster.local
#
#---
#
#apiVersion: cert-manager.io/v1
#kind: Certificate
#metadata:
# name: argocd-tls
#spec:
# secretName: argocd-tls
# issuerRef:
# name: letsencrypt-production
# kind: ClusterIssuer
# commonName: "argocd.internal.durp.info"
# dnsNames:
# - "argocd.internal.durp.info"

View file

@ -1,63 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: blueiris
spec:
ports:
- name: app
port: 81
protocol: TCP
targetPort: 81
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: blueiris
subsets:
- addresses:
- ip: 192.168.99.2
ports:
- name: app
port: 81
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: blueiris-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`blueiris.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: blueiris
port: 81
scheme: http
tls:
secretName: blueiris-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: blueiris-tls
spec:
secretName: blueiris-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "blueiris.internal.durp.info"
dnsNames:
- "blueiris.internal.durp.info"

View file

@ -1,70 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: duplicati
spec:
ports:
- name: app
port: 8200
protocol: TCP
targetPort: 8200
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: duplicati
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 8200
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: duplicati-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`duplicati.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: duplicati
port: 8200
- match: Host(`duplicati.internal.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule
services:
- name: ak-outpost-master-embedded-outpost
namespace: authentik
port: 9000
tls:
secretName: duplicati-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: duplicati-tls
spec:
secretName: duplicati-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "duplicati.internal.durp.info"
dnsNames:
- "duplicati.internal.durp.info"

View file

@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: gitea
spec:
ports:
- name: app
port: 3000
protocol: TCP
targetPort: 3000
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: gitea
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 3000
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: gitea-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`gitea.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: gitea
port: 3000
scheme: http
tls:
secretName: gitea-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: gitea-tls
spec:
secretName: gitea-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "gitea.durp.info"
dnsNames:
- "gitea.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: gitea-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: gitea.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,60 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: infra
spec:
ports:
- name: app
port: 443
protocol: TCP
targetPort: 443
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: infra
subsets:
- addresses:
- ip: 192.168.12.130
ports:
- name: app
port: 443
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: infra-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`*.infra.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: infra
port: 443
scheme: https
tls:
secretName: infra-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: infra-tls
spec:
secretName: infra-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "infra.durp.info"
dnsNames:
- "*.infra.durp.info"

View file

@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: jellyfin
spec:
ports:
- name: app
port: 8096
protocol: TCP
targetPort: 8096
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: jellyfin
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 8096
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`jellyfin.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: jellyfin
port: 8096
scheme: http
tls:
secretName: jellyfin-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: jellyfin-tls
spec:
secretName: jellyfin-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "jellyfin.durp.info"
dnsNames:
- "jellyfin.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: jellyfin-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: jellyfin.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kasm
spec:
ports:
- name: app
port: 443
protocol: TCP
targetPort: 443
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: kasm
subsets:
- addresses:
- ip: 192.168.20.104
ports:
- name: app
port: 443
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: kasm-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`kasm.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: kasm
port: 443
scheme: https
tls:
secretName: kasm-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kasm-tls
spec:
secretName: kasm-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "kasm.durp.info"
dnsNames:
- "kasm.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: kasm-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: kasm.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,63 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: minio
spec:
ports:
- name: app
port: 9769
protocol: TCP
targetPort: 9769
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: minio
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 9769
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: minio-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`minio.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: minio
port: 9769
scheme: http
tls:
secretName: minio-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: minio-tls
spec:
secretName: minio-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "minio.internal.durp.info"
dnsNames:
- "minio.internal.durp.info"

View file

@ -1,71 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nexus
spec:
ports:
- name: app
port: 8081
protocol: TCP
targetPort: 8081
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: nexus
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 8081
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nexus-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`nexus.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: nexus
port: 8081
tls:
secretName: nexus-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: nexus-tls
spec:
secretName: nexus-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "nexus.durp.info"
dnsNames:
- "nexus.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: nexus-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: nexus.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,63 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: octopus
spec:
ports:
- name: app
port: 443
protocol: TCP
targetPort: 443
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: octopus
subsets:
- addresses:
- ip: 192.168.20.105
ports:
- name: app
port: 443
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: octopus-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`octopus.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: octopus
port: 443
scheme: https
tls:
secretName: octopus-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: octopus-tls
spec:
secretName: octopus-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "octopus.internal.durp.info"
dnsNames:
- "octopus.internal.durp.info"

View file

@ -1,102 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ollama-secret
spec:
secretStoreRef:
name: vault
kind: ClusterSecretStore
target:
name: ollama-secret
data:
- secretKey: users
remoteRef:
key: secrets/internalproxy/ollama
property: users
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: ollama-basic-auth
spec:
basicAuth:
headerField: x-api-key
secret: ollama-secret
---
apiVersion: v1
kind: Service
metadata:
name: ollama
spec:
ports:
- name: app
port: 11435
protocol: TCP
targetPort: 11435
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: ollama
subsets:
- addresses:
- ip: 192.168.20.104
ports:
- name: app
port: 11435
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: ollama-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`ollama.durp.info`) && PathPrefix(`/`)
middlewares:
- name: ollama-basic-auth
kind: Rule
services:
- name: ollama
port: 11435
tls:
secretName: ollama-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: ollama-tls
spec:
secretName: ollama-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "ollama.durp.info"
dnsNames:
- "ollama.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: ollama-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: ollama.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: plex
spec:
ports:
- name: app
port: 32400
protocol: TCP
targetPort: 32400
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: plex
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 32400
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: plex-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`plex.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: plex
port: 32400
scheme: https
tls:
secretName: plex-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: plex-tls
spec:
secretName: plex-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "plex.durp.info"
dnsNames:
- "plex.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: plex-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: plex.durp.info
spec:
type: ExternalName
externalName: durp.info

View file

@ -1,63 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: portainer
spec:
ports:
- name: app
port: 9443
protocol: TCP
targetPort: 9443
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: portainer
subsets:
- addresses:
- ip: 192.168.20.104
ports:
- name: app
port: 9443
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: portainer-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`portainer.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: portainer
port: 9443
scheme: https
tls:
secretName: portainer-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: portainer-tls
spec:
secretName: portainer-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "portainer.internal.durp.info"
dnsNames:
- "portainer.internal.durp.info"

View file

@ -1,63 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: proxmox
spec:
ports:
- name: app
port: 8006
protocol: TCP
targetPort: 8006
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: proxmox
subsets:
- addresses:
- ip: 192.168.21.254
ports:
- name: app
port: 8006
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: proxmox-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`proxmox.internal.durp.info`) && PathPrefix(`/`)
middlewares:
- name: whitelist
namespace: traefik
kind: Rule
services:
- name: proxmox
port: 8006
scheme: https
tls:
secretName: proxmox-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: proxmox-tls
spec:
secretName: proxmox-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "proxmox.internal.durp.info"
dnsNames:
- "proxmox.internal.durp.info"

View file

@ -1,68 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: redlib
spec:
ports:
- name: app
port: 8082
protocol: TCP
targetPort: 8082
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: redlib
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 8082
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: redlib-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`redlib.durp.info`) && PathPrefix(`/`)
middlewares:
- name: authentik-proxy-provider
namespace: traefik
kind: Rule
services:
- name: redlib
port: 8082
- match: Host(`redlib.durp.info`) && PathPrefix(`/outpost.goauthentik.io`)
kind: Rule
services:
- name: ak-outpost-authentik-embedded-outpost
namespace: authentik
port: 9000
tls:
secretName: redlib-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: redlib-tls
spec:
secretName: redlib-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "redlib.durp.info"
dnsNames:
- "redlib.durp.info"

View file

@ -1,59 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: registry-internal
spec:
ports:
- name: app
port: 5000
protocol: TCP
targetPort: 5000
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: registry-internal
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 5000
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: registry-internal-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`registry.internal.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: registry-internal
port: 5000
tls:
secretName: registry-internal-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: registry-internal-tls
spec:
secretName: registry-internal-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "registry.internal.durp.info"
dnsNames:
- "registry.internal.durp.info"

View file

@ -1,71 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: registry
spec:
ports:
- name: app
port: 5000
protocol: TCP
targetPort: 5000
clusterIP: None
type: ClusterIP
---
apiVersion: v1
kind: Endpoints
metadata:
name: registry
subsets:
- addresses:
- ip: 192.168.20.200
ports:
- name: app
port: 5000
protocol: TCP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: registry-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`registry.durp.info`) && PathPrefix(`/`)
kind: Rule
services:
- name: registry
port: 5000
tls:
secretName: registry-tls
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: registry-tls
spec:
secretName: registry-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
commonName: "registry.durp.info"
dnsNames:
- "registry.durp.info"
---
kind: Service
apiVersion: v1
metadata:
name: registry-external-dns
annotations:
external-dns.alpha.kubernetes.io/hostname: registry.durp.info
spec:
type: ExternalName
externalName: durp.info

Some files were not shown because too many files have changed in this diff Show more