“部署私有Docker私有镜像仓库 harbor Registry-Docker”的版本间的差异

来自linux中国网wiki
跳到导航 跳到搜索
 
(未显示同一用户的44个中间版本)
第1行: 第1行:
=高级之Harbor=
+
=Helm部署Harbor,实现高可用的镜像仓库=
 +
 
 +
harbor    US: [ˈhɑrbər] n.港口;港湾;
 +
 
 +
== 添加Harbor 官方Helm Chart仓库==
 +
<pre>
 +
$ helm repo add harbor  https://helm.goharbor.io
 +
"harbor" has been added to your repositories
 +
$ helm repo  list                  # 查看添加的Chart
 +
NAME    URL                   
 +
harbor  https://helm.goharbor.io
 +
 
 +
</pre>
 +
 
 +
==下载Chart包到本地==
 +
<pre>
 +
$ helm search repo harbor        # 搜索chart包 版本可能不一样
 +
NAME          CHART VERSION APP VERSION DESCRIPTION                                     
 +
harbor/harbor 1.8.2        2.4.2      An open source trusted cloud native registry th...
 +
$ helm pull harbor/harbor  # 下载Chart包
 +
$ tar zxvf harbor-1.8.2.tgz  # 解压包
 +
</pre>
 +
== 创建命名空间 ==
 +
 
 +
<pre>
 +
kubectl  delete  namespaces  harbor                                                                                 
 +
 
 +
#创建命名空间                                                                                                                                                                                                                                kubectl create  namespace harbor 
 +
</pre>
 +
 
 +
== 创建NFS外部供应商==
 +
请见其它相关wiki
 +
==创建运行的sa账号并做RBAC授权==
 +
<pre>
 +
cat  nfs-provisioner.yaml
 +
apiVersion: v1
 +
kind: ServiceAccount
 +
metadata:
 +
  name: nfs-provisioner
 +
  namespace: harbor
 +
---
 +
apiVersion: rbac.authorization.k8s.io/v1
 +
kind: ClusterRole
 +
metadata:
 +
  name: nfs-provisioner-cr
 +
rules:
 +
- apiGroups: [""]
 +
  resources: ["persistentvolumes"]
 +
  verbs: ["get", "list", "watch", "create", "delete"]
 +
- apiGroups: [""]
 +
  resources: ["persistentvolumeclaims"]
 +
  verbs: ["get", "list", "watch", "update"]
 +
- apiGroups: ["storage.k8s.io"]
 +
  resources: ["storageclasses"]
 +
  verbs: ["get", "list", "watch"]
 +
- apiGroups: [""]
 +
  resources: ["events"]
 +
  verbs: ["create", "update", "patch"]
 +
- apiGroups: [""]
 +
  resources: ["services", "endpoints"]
 +
  verbs: ["get"]
 +
- apiGroups: ["extensions"]
 +
  resources: ["podsecuritypolicies"]
 +
  resourceNames: ["nfs-provisioner"]
 +
  verbs: ["use"]
 +
 
 +
---
 +
apiVersion: rbac.authorization.k8s.io/v1
 +
kind: ClusterRoleBinding
 +
metadata:
 +
  name: run-nfs-provisioner
 +
subjects:
 +
  - kind: ServiceAccount
 +
    name: nfs-provisioner
 +
    namespace: harbor
 +
roleRef:
 +
  kind: ClusterRole
 +
  name: nfs-provisioner-cr
 +
  apiGroup: rbac.authorization.k8s.io
 +
---
 +
apiVersion: rbac.authorization.k8s.io/v1
 +
kind: Role
 +
metadata:
 +
  name: nfs-role
 +
  namespace: harbor
 +
rules:
 +
  - apiGroups: [""]
 +
    resources: ["endpoints"]
 +
    verbs: ["get","list","watch","create","update","patch"]
 +
 
 +
---
 +
kind: RoleBinding
 +
apiVersion: rbac.authorization.k8s.io/v1
 +
metadata:
 +
  name: leader-locking-nfs-provisioner
 +
  namespace: harbor
 +
subjects:
 +
- kind: ServiceAccount
 +
  name: nfs-provisioner
 +
  namespace: harbor
 +
roleRef:
 +
kind: Role
 +
name: nfs-role
 +
apiGroup: rbac.authorization.k8s.io
 +
 
 +
---
 +
apiVersion: apps/v1
 +
kind: Deployment
 +
metadata:
 +
  name: nfs-proversitioner
 +
  namespace: harbor
 +
spec:
 +
  selector:
 +
    matchLabels:
 +
      app: nfs-provisioner
 +
  replicas: 1
 +
  strategy:
 +
    type: Recreate
 +
  template:
 +
    metadata:
 +
      labels:
 +
        app: nfs-provisioner
 +
    spec:
 +
      serviceAccount: nfs-provisioner
 +
      containers:
 +
      - name: nfs-provisioner
 +
        image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0
 +
        imagePullPolicy: IfNotPresent
 +
        volumeMounts:
 +
        - name: nfs-client-root
 +
          mountPath: /persistentvolumes
 +
        env:
 +
          - name: PROVISIONER_NAME
 +
            value: example.com/nfs
 +
          - name: NFS_SERVER
 +
            value: 192.168.10.175
 +
          - name: NFS_PATH
 +
            value: /data/nfs/harbor
 +
      volumes:
 +
      - name: nfs-client-root
 +
        nfs:
 +
          server: 192.168.10.175
 +
          path: /data/nfs/harbor
 +
 
 +
 
 +
 
 +
 
 +
kubectl  apply -f  nfs-provisioner.yaml   
 +
 
 +
kubectl -n harbor get pod
 +
nfs-proversitioner-5b8bf5b8b9-m879p    1/1    Running  0          69m
 +
                                                                 
 +
   
 +
</pre>
 +
 
 +
== ==
 +
<pre>
 +
cat harbor-storageclass.yaml
 +
apiVersion: storage.k8s.io/v1
 +
kind: StorageClass
 +
metadata:
 +
  name: harbor-storageclass
 +
  namespace: harbor
 +
provisioner: example.com/nfs
 +
 
 +
 
 +
kubectl  apply -f  harbor-storageclass.yaml                                                                 
 +
kubectl  -n  harbor  get storageclass 
 +
 
 +
NAME                  PROVISIONER      RECLAIMPOLICY  VOLUMEBINDINGMODE  ALLOWVOLUMEEXPANSION  AGE
 +
harbor-storageclass  example.com/nfs  Delete          Immediate          false                  5d20h
 +
 
 +
</pre> 
 +
 
 +
== 修改values.yaml配置==   
 +
<pre>   
 +
cd harbor 
 +
 
 +
#注意  使用nodePort且关闭tls
 +
cat values.yaml
 +
expose:
 +
  # Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
 +
  # and fill the information in the corresponding section
 +
  type: nodePort
 +
  tls:
 +
    # Enable TLS or not.
 +
    # Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
 +
    # Note: if the "expose.type" is "ingress" and TLS is disabled,
 +
    # the port must be included in the command when pulling/pushing images.
 +
    # Refer to https://github.com/goharbor/harbor/issues/5291 for details.
 +
    enabled: false
 +
    # The source of the tls certificate. Set as "auto", "secret"
 +
    # or "none" and fill the information in the corresponding section
 +
    # 1) auto: generate the tls certificate automatically
 +
    # 2) secret: read the tls certificate from the specified secret.
 +
    # The tls certificate can be generated manually or by cert manager
 +
    # 3) none: configure no tls certificate for the ingress. If the default
 +
    # tls certificate is configured in the ingress controller, choose this option
 +
    certSource: auto
 +
    auto:
 +
      # The common name used to generate the certificate, it's necessary
 +
      # when the type isn't "ingress"
 +
      commonName: ""
 +
    secret:
 +
      # The name of secret which contains keys named:
 +
      # "tls.crt" - the certificate
 +
      # "tls.key" - the private key
 +
      secretName: ""
 +
      # The name of secret which contains keys named:
 +
      # "tls.crt" - the certificate
 +
      # "tls.key" - the private key
 +
      # Only needed when the "expose.type" is "ingress".
 +
      notarySecretName: ""
 +
  ingress:
 +
    hosts:
 +
      core: core.harbor.domain
 +
      notary: notary.harbor.domain
 +
    # set to the type of ingress controller if it has specific requirements.
 +
    # leave as `default` for most ingress controllers.
 +
    # set to `gce` if using the GCE ingress controller
 +
    # set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
 +
    controller: default
 +
    ## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress
 +
    kubeVersionOverride: ""
 +
    className: ""
 +
    annotations:
 +
      # note different ingress controllers may require a different ssl-redirect annotation
 +
      # for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below
 +
      ingress.kubernetes.io/ssl-redirect: "true"
 +
      ingress.kubernetes.io/proxy-body-size: "0"
 +
      nginx.ingress.kubernetes.io/ssl-redirect: "true"
 +
      nginx.ingress.kubernetes.io/proxy-body-size: "0"
 +
    notary:
 +
      # notary ingress-specific annotations
 +
      annotations: {}
 +
      # notary ingress-specific labels
 +
      labels: {}
 +
    harbor:
 +
      # harbor ingress-specific annotations
 +
      annotations: {}
 +
      # harbor ingress-specific labels
 +
      labels: {}
 +
  clusterIP:
 +
    # The name of ClusterIP service
 +
    name: harbor
 +
    # Annotations on the ClusterIP service
 +
    annotations: {}
 +
    ports:
 +
      # The service port Harbor listens on when serving HTTP
 +
      httpPort: 80
 +
      # The service port Harbor listens on when serving HTTPS
 +
      httpsPort: 443
 +
      # The service port Notary listens on. Only needed when notary.enabled
 +
      # is set to true
 +
      notaryPort: 4443
 +
  nodePort:
 +
    # The name of NodePort service
 +
    name: harbor
 +
    ports:
 +
      http:
 +
        # The service port Harbor listens on when serving HTTP
 +
        port: 80
 +
        # The node port Harbor listens on when serving HTTP
 +
        nodePort: 30002
 +
      https:
 +
        # The service port Harbor listens on when serving HTTPS
 +
        port: 443
 +
        # The node port Harbor listens on when serving HTTPS
 +
        nodePort: 30003
 +
      # Only needed when notary.enabled is set to true
 +
      notary:
 +
        # The service port Notary listens on
 +
        port: 4443
 +
        # The node port Notary listens on
 +
        nodePort: 30004
 +
  loadBalancer:
 +
    # The name of LoadBalancer service
 +
    name: harbor
 +
    # Set the IP if the LoadBalancer supports assigning IP
 +
    IP: ""
 +
    ports:
 +
      # The service port Harbor listens on when serving HTTP
 +
      httpPort: 80
 +
      # The service port Harbor listens on when serving HTTPS
 +
      httpsPort: 443
 +
      # The service port Notary listens on. Only needed when notary.enabled
 +
      # is set to true
 +
      notaryPort: 4443
 +
    annotations: {}
 +
    sourceRanges: []
 +
 
 +
# The external URL for Harbor core service. It is used to
 +
# 1) populate the docker/helm commands showed on portal
 +
# 2) populate the token service URL returned to docker/notary client
 +
#
 +
# Format: protocol://domain[:port]. Usually:
 +
# 1) if "expose.type" is "ingress", the "domain" should be
 +
# the value of "expose.ingress.hosts.core"
 +
# 2) if "expose.type" is "clusterIP", the "domain" should be
 +
# the value of "expose.clusterIP.name"
 +
# 3) if "expose.type" is "nodePort", the "domain" should be
 +
# the IP address of k8s node
 +
#
 +
# If Harbor is deployed behind the proxy, set it as the URL of proxy
 +
externalURL: http://192.168.10.171:30002
 +
 
 +
# The internal TLS used for harbor components secure communicating. In order to enable https
 +
# in each components tls cert files need to provided in advance.
 +
internalTLS:
 +
  # If internal TLS enabled
 +
  enabled: false
 +
  # There are three ways to provide tls
 +
  # 1) "auto" will generate cert automatically
 +
  # 2) "manual" need provide cert file manually in following value
 +
  # 3) "secret" internal certificates from secret
 +
  certSource: "auto"
 +
  # The content of trust ca, only available when `certSource` is "manual"
 +
  trustCa: ""
 +
  # core related cert configuration
 +
  core:
 +
    # secret name for core's tls certs
 +
    secretName: ""
 +
    # Content of core's TLS cert file, only available when `certSource` is "manual"
 +
    crt: ""
 +
    # Content of core's TLS key file, only available when `certSource` is "manual"
 +
    key: ""
 +
  # jobservice related cert configuration
 +
  jobservice:
 +
    # secret name for jobservice's tls certs
 +
    secretName: ""
 +
    # Content of jobservice's TLS key file, only available when `certSource` is "manual"
 +
    crt: ""
 +
    # Content of jobservice's TLS key file, only available when `certSource` is "manual"
 +
    key: ""
 +
  # registry related cert configuration
 +
  registry:
 +
    # secret name for registry's tls certs
 +
    secretName: ""
 +
    # Content of registry's TLS key file, only available when `certSource` is "manual"
 +
    crt: ""
 +
    # Content of registry's TLS key file, only available when `certSource` is "manual"
 +
    key: ""
 +
  # portal related cert configuration
 +
  portal:
 +
    # secret name for portal's tls certs
 +
    secretName: ""
 +
    # Content of portal's TLS key file, only available when `certSource` is "manual"
 +
    crt: ""
 +
    # Content of portal's TLS key file, only available when `certSource` is "manual"
 +
    key: ""
 +
  # chartmuseum related cert configuration
 +
  chartmuseum:
 +
    # secret name for chartmuseum's tls certs
 +
    secretName: ""
 +
    # Content of chartmuseum's TLS key file, only available when `certSource` is "manual"
 +
    crt: ""
 +
    # Content of chartmuseum's TLS key file, only available when `certSource` is "manual"
 +
    key: ""
 +
  # trivy related cert configuration
 +
  trivy:
 +
    # secret name for trivy's tls certs
 +
    secretName: ""
 +
    # Content of trivy's TLS key file, only available when `certSource` is "manual"
 +
    crt: ""
 +
    # Content of trivy's TLS key file, only available when `certSource` is "manual"
 +
    key: ""
 +
 
 +
ipFamily:
 +
  # ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component
 +
  ipv6:
 +
    enabled: true
 +
  # ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component
 +
  ipv4:
 +
    enabled: true
 +
 
 +
# The persistence is enabled by default and a default StorageClass
 +
# is needed in the k8s cluster to provision volumes dynamically.
 +
# Specify another StorageClass in the "storageClass" or set "existingClaim"
 +
# if you already have existing persistent volumes to use
 +
#
 +
# For storing images and charts, you can also use "azure", "gcs", "s3",
 +
# "swift" or "oss". Set it in the "imageChartStorage" section
 +
persistence:
 +
  enabled: true
 +
  # Setting it to "keep" to avoid removing PVCs during a helm delete
 +
  # operation. Leaving it empty will delete PVCs after the chart deleted
 +
  # (this does not apply for PVCs that are created for internal database
 +
  # and redis components, i.e. they are never deleted automatically)
 +
  resourcePolicy: "keep"
 +
  persistentVolumeClaim:
 +
    registry:
 +
      # Use the existing PVC which must be created manually before bound,
 +
      # and specify the "subPath" if the PVC is shared with other components
 +
      existingClaim: ""
 +
      # Specify the "storageClass" used to provision the volume. Or the default
 +
      # StorageClass will be used (the default).
 +
      # Set it to "-" to disable dynamic provisioning
 +
      storageClass: "harbor-storageclass"
 +
      subPath: ""
 +
      accessMode: ReadWriteMany
 +
      size: 5Gi
 +
      annotations: {}
 +
    chartmuseum:
 +
      existingClaim: ""
 +
      storageClass: "harbor-storageclass"
 +
      subPath: ""
 +
      accessMode: ReadWriteMany
 +
      size: 5Gi
 +
      annotations: {}
 +
    jobservice:
 +
      existingClaim: ""
 +
      storageClass: "harbor-storageclass"
 +
      subPath: ""
 +
      accessMode: ReadWriteMany
 +
      size: 1Gi
 +
      annotations: {}
 +
    # If external database is used, the following settings for database will
 +
    # be ignored
 +
    database:
 +
      existingClaim: ""
 +
      storageClass: "harbor-storageclass"
 +
      subPath: ""
 +
      accessMode: ReadWriteMany
 +
      size: 1Gi
 +
      annotations: {}
 +
    # If external Redis is used, the following settings for Redis will
 +
    # be ignored
 +
    redis:
 +
      existingClaim: ""
 +
      storageClass: "harbor-storageclass"
 +
      subPath: ""
 +
      accessMode: ReadWriteMany
 +
      size: 1Gi
 +
      annotations: {}
 +
    trivy:
 +
      existingClaim: ""
 +
      storageClass: "harbor-storageclass"
 +
      subPath: ""
 +
      accessMode: ReadWriteMany
 +
      size: 5Gi
 +
      annotations: {}
 +
  # Define which storage backend is used for registry and chartmuseum to store
 +
  # images and charts. Refer to
 +
  # https://github.com/docker/distribution/blob/master/docs/configuration.md#storage
 +
  # for the detail.
 +
  imageChartStorage:
 +
    # Specify whether to disable `redirect` for images and chart storage, for
 +
    # backends which not supported it (such as using minio for `s3` storage type), please disable
 +
    # it. To disable redirects, simply set `disableredirect` to `true` instead.
 +
    # Refer to
 +
    # https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect
 +
    # for the detail.
 +
    disableredirect: false
 +
    # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate.
 +
    # The secret must contain keys named "ca.crt" which will be injected into the trust store
 +
    # of registry's and chartmuseum's containers.
 +
    # caBundleSecretName:
 +
 
 +
    # Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift",
 +
    # "oss" and fill the information needed in the corresponding section. The type
 +
    # must be "filesystem" if you want to use persistent volumes for registry
 +
    # and chartmuseum
 +
    type: filesystem
 +
    filesystem:
 +
      rootdirectory: /storage
 +
      #maxthreads: 100
 +
    azure:
 +
      accountname: accountname
 +
      accountkey: base64encodedaccountkey
 +
      container: containername
 +
      #realm: core.windows.net
 +
    gcs:
 +
      bucket: bucketname
 +
      # The base64 encoded json file which contains the key
 +
      encodedkey: base64-encoded-json-key-file
 +
      #rootdirectory: /gcs/object/name/prefix
 +
      #chunksize: "5242880"
 +
    s3:
 +
      region: us-west-1
 +
      bucket: bucketname
 +
      #accesskey: awsaccesskey
 +
      #secretkey: awssecretkey
 +
      #regionendpoint: http://myobjects.local
 +
      #encrypt: false
 +
      #keyid: mykeyid
 +
      #secure: true
 +
      #skipverify: false
 +
      #v4auth: true
 +
      #chunksize: "5242880"
 +
      #rootdirectory: /s3/object/name/prefix
 +
      #storageclass: STANDARD
 +
      #multipartcopychunksize: "33554432"
 +
      #multipartcopymaxconcurrency: 100
 +
      #multipartcopythresholdsize: "33554432"
 +
    swift:
 +
      authurl: https://storage.myprovider.com/v3/auth
 +
      username: username
 +
      password: password
 +
      container: containername
 +
      #region: fr
 +
      #tenant: tenantname
 +
      #tenantid: tenantid
 +
      #domain: domainname
 +
      #domainid: domainid
 +
      #trustid: trustid
 +
      #insecureskipverify: false
 +
      #chunksize: 5M
 +
      #prefix:
 +
      #secretkey: secretkey
 +
      #accesskey: accesskey
 +
      #authversion: 3
 +
      #endpointtype: public
 +
      #tempurlcontainerkey: false
 +
      #tempurlmethods:
 +
    oss:
 +
      accesskeyid: accesskeyid
 +
      accesskeysecret: accesskeysecret
 +
      region: regionname
 +
      bucket: bucketname
 +
      #endpoint: endpoint
 +
      #internal: false
 +
      #encrypt: false
 +
      #secure: true
 +
      #chunksize: 10M
 +
      #rootdirectory: rootdirectory
 +
 
 +
imagePullPolicy: IfNotPresent
 +
 
 +
# Use this set to assign a list of default pullSecrets
 +
imagePullSecrets:
 +
#  - name: docker-registry-secret
 +
#  - name: internal-registry-secret
 +
 
 +
# The update strategy for deployments with persistent volumes(jobservice, registry
 +
# and chartmuseum): "RollingUpdate" or "Recreate"
 +
# Set it as "Recreate" when "RWM" for volumes isn't supported
 +
updateStrategy:
 +
  type: RollingUpdate
 +
 
 +
# debug, info, warning, error or fatal
 +
logLevel: info
 +
 
 +
# The initial password of Harbor admin. Change it from portal after launching Harbor
 +
harborAdminPassword: "evan2240881"
 +
#harborAdminPassword: "Harbor12345"
 +
 
 +
# The name of the secret which contains key named "ca.crt". Setting this enables the
 +
# download link on portal to download the CA certificate when the certificate isn't
 +
# generated automatically
 +
caSecretName: ""
 +
 
 +
# The secret key used for encryption. Must be a string of 16 chars.
 +
secretKey: "not-a-secure-key"
 +
 
 +
# The proxy settings for updating trivy vulnerabilities from the Internet and replicating
 +
# artifacts from/to the registries that cannot be reached directly
 +
proxy:
 +
  httpProxy:
 +
  httpsProxy:
 +
  noProxy: 127.0.0.1,localhost,.local,.internal
 +
  components:
 +
    - core
 +
    - jobservice
 +
    - trivy
 +
 
 +
# Run the migration job via helm hook
 +
enableMigrateHelmHook: false
 +
 
 +
# The custom ca bundle secret, the secret must contain key named "ca.crt"
 +
# which will be injected into the trust store for chartmuseum, core, jobservice, registry, trivy components
 +
# caBundleSecretName: ""
 +
 
 +
## UAA Authentication Options
 +
# If you're using UAA for authentication behind a self-signed
 +
# certificate you will need to provide the CA Cert.
 +
# Set uaaSecretName below to provide a pre-created secret that
 +
# contains a base64 encoded CA Certificate named `ca.crt`.
 +
# uaaSecretName:
 +
 
 +
# If service exposed via "ingress", the Nginx will not be used
 +
nginx:
 +
  image:
 +
    repository: goharbor/nginx-photon
 +
    tag: v2.5.3
 +
  # set the service account to be used, default if left empty
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  replicas: 1
 +
  revisionHistoryLimit: 10
 +
  # resources:
 +
  #  requests:
 +
  #    memory: 256Mi
 +
  #    cpu: 100m
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
 
 +
portal:
 +
  image:
 +
    repository: goharbor/harbor-portal
 +
    tag: v2.5.3
 +
  # set the service account to be used, default if left empty
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  replicas: 1
 +
  revisionHistoryLimit: 10
 +
  # resources:
 +
  #  requests:
 +
  #    memory: 256Mi
 +
  #    cpu: 100m
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
 
 +
core:
 +
  image:
 +
    repository: goharbor/harbor-core
 +
    tag: v2.5.3
 +
  # set the service account to be used, default if left empty
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  replicas: 1
 +
  revisionHistoryLimit: 10
 +
  ## Startup probe values
 +
  startupProbe:
 +
    enabled: true
 +
    initialDelaySeconds: 10
 +
  # resources:
 +
  #  requests:
 +
  #    memory: 256Mi
 +
  #    cpu: 100m
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
  # Secret is used when core server communicates with other components.
 +
  # If a secret key is not specified, Helm will generate one.
 +
  # Must be a string of 16 chars.
 +
  secret: ""
 +
  # Fill the name of a kubernetes secret if you want to use your own
 +
  # TLS certificate and private key for token encryption/decryption.
 +
  # The secret must contain keys named:
 +
  # "tls.crt" - the certificate
 +
  # "tls.key" - the private key
 +
  # The default key pair will be used if it isn't set
 +
  secretName: ""
 +
  # The XSRF key. Will be generated automatically if it isn't specified
 +
  xsrfKey: ""
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
  # The time duration for async update artifact pull_time and repository
 +
  # pull_count, the unit is second. Will be 10 seconds if it isn't set.
 +
  # eg. artifactPullAsyncFlushDuration: 10
 +
  artifactPullAsyncFlushDuration:
 +
 
 +
jobservice:
 +
  image:
 +
    repository: goharbor/harbor-jobservice
 +
    tag: v2.5.3
 +
  replicas: 1
 +
  revisionHistoryLimit: 10
 +
  # set the service account to be used, default if left empty
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  maxJobWorkers: 10
 +
  # The logger for jobs: "file", "database" or "stdout"
 +
  jobLoggers:
 +
    - file
 +
    # - database
 +
    # - stdout
 +
  # The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
 +
  loggerSweeperDuration: 14 #days
 +
 
 +
  # resources:
 +
  #  requests:
 +
  #    memory: 256Mi
 +
  #    cpu: 100m
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
  # Secret is used when job service communicates with other components.
 +
  # If a secret key is not specified, Helm will generate one.
 +
  # Must be a string of 16 chars.
 +
  secret: ""
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
 
 +
registry:
 +
  # set the service account to be used, default if left empty
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  registry:
 +
    image:
 +
      repository: goharbor/registry-photon
 +
      tag: v2.5.3
 +
    # resources:
 +
    #  requests:
 +
    #    memory: 256Mi
 +
    #    cpu: 100m
 +
  controller:
 +
    image:
 +
      repository: goharbor/harbor-registryctl
 +
      tag: v2.5.3
 +
 
 +
    # resources:
 +
    #  requests:
 +
    #    memory: 256Mi
 +
    #    cpu: 100m
 +
  replicas: 1
 +
  revisionHistoryLimit: 10
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
  # Secret is used to secure the upload state from client
 +
  # and registry storage backend.
 +
  # See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http
 +
  # If a secret key is not specified, Helm will generate one.
 +
  # Must be a string of 16 chars.
 +
  secret: ""
 +
  # If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL.
 +
  relativeurls: false
 +
  credentials:
 +
    username: "harbor_registry_user"
 +
    password: "harbor_registry_password"
 +
    # Login and password in htpasswd string format. Excludes `registry.credentials.username`  and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt.
 +
    # htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string
 +
  middleware:
 +
    enabled: false
 +
    type: cloudFront
 +
    cloudFront:
 +
      baseurl: example.cloudfront.net
 +
      keypairid: KEYPAIRID
 +
      duration: 3000s
 +
      ipfilteredby: none
 +
      # The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
 +
      # that allows access to CloudFront
 +
      privateKeySecret: "my-secret"
 +
  # enable purge _upload directories
 +
  upload_purging:
 +
    enabled: true
 +
    # remove files in _upload directories which exist for a period of time, default is one week.
 +
    age: 168h
 +
    # the interval of the purge operations
 +
    interval: 24h
 +
    dryrun: false
 +
 
 +
chartmuseum:
 +
  enabled: true
 +
  # set the service account to be used, default if left empty
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  # Harbor defaults ChartMuseum to returning relative urls, if you want using absolute url you should enable it by change the following value to 'true'
 +
  absoluteUrl: false
 +
  image:
 +
    repository: goharbor/chartmuseum-photon
 +
    tag: v2.5.3
 +
  replicas: 1
 +
  revisionHistoryLimit: 10
 +
  # resources:
 +
  #  requests:
 +
  #    memory: 256Mi
 +
  #    cpu: 100m
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
  ## limit the number of parallel indexers
 +
  indexLimit: 0
 +
 
 +
trivy:
 +
  # enabled the flag to enable Trivy scanner
 +
  enabled: true
 +
  image:
 +
    # repository the repository for Trivy adapter image
 +
    repository: goharbor/trivy-adapter-photon
 +
    # tag the tag for Trivy adapter image
 +
    tag: v2.5.3
 +
  # set the service account to be used, default if left empty
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  # replicas the number of Pod replicas
 +
  replicas: 1
 +
  # debugMode the flag to enable Trivy debug mode with more verbose scanning log
 +
  debugMode: false
 +
  # vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`.
 +
  vulnType: "os,library"
 +
  # severity a comma-separated list of severities to be checked
 +
  severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
 +
  # ignoreUnfixed the flag to display only fixed vulnerabilities
 +
  ignoreUnfixed: false
 +
  # insecure the flag to skip verifying registry certificate
 +
  insecure: false
 +
  # gitHubToken the GitHub access token to download Trivy DB
 +
  #
 +
  # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
 +
  # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
 +
  # in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
 +
  # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
 +
  # Currently, the database is updated every 12 hours and published as a new release to GitHub.
 +
  #
 +
  # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
 +
  # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
 +
  # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
 +
  # https://developer.github.com/v3/#rate-limiting
 +
  #
 +
  # You can create a GitHub token by following the instructions in
 +
  # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
 +
  gitHubToken: ""
 +
  # skipUpdate the flag to disable Trivy DB downloads from GitHub
 +
  #
 +
  # You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
 +
  # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
 +
  # `/home/scanner/.cache/trivy/db/trivy.db` path.
 +
  skipUpdate: false
 +
  # The offlineScan option prevents Trivy from sending API requests to identify dependencies.
 +
  #
 +
  # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
 +
  # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
 +
  # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
 +
  # It would work if all the dependencies are in local.
 +
  # This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment.
 +
  offlineScan: false
 +
  # The duration to wait for scan completion
 +
  timeout: 5m0s
 +
  resources:
 +
    requests:
 +
      cpu: 200m
 +
      memory: 512Mi
 +
    limits:
 +
      cpu: 1
 +
      memory: 1Gi
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
 
 +
notary:
 +
  enabled: true
 +
  server:
 +
    # set the service account to be used, default if left empty
 +
    serviceAccountName: ""
 +
    # mount the service account token
 +
    automountServiceAccountToken: false
 +
    image:
 +
      repository: goharbor/notary-server-photon
 +
      tag: v2.5.3
 +
    replicas: 1
 +
    # resources:
 +
    #  requests:
 +
    #    memory: 256Mi
 +
    #    cpu: 100m
 +
    nodeSelector: {}
 +
    tolerations: []
 +
    affinity: {}
 +
    ## Additional deployment annotations
 +
    podAnnotations: {}
 +
    ## The priority class to run the pod as
 +
    priorityClassName:
 +
  signer:
 +
    # set the service account to be used, default if left empty
 +
    serviceAccountName: ""
 +
    # mount the service account token
 +
    automountServiceAccountToken: false
 +
    image:
 +
      repository: goharbor/notary-signer-photon
 +
      tag: v2.5.3
 +
    replicas: 1
 +
    # resources:
 +
    #  requests:
 +
    #    memory: 256Mi
 +
    #    cpu: 100m
 +
    nodeSelector: {}
 +
    tolerations: []
 +
    affinity: {}
 +
    ## Additional deployment annotations
 +
    podAnnotations: {}
 +
    ## The priority class to run the pod as
 +
    priorityClassName:
 +
  # Fill the name of a kubernetes secret if you want to use your own
 +
  # TLS certificate authority, certificate and private key for notary
 +
  # communications.
 +
  # The secret must contain keys named ca.crt, tls.crt and tls.key that
 +
  # contain the CA, certificate and private key.
 +
  # They will be generated if not set.
 +
  secretName: ""
 +
 
 +
database:
 +
  # if external database is used, set "type" to "external"
 +
  # and fill the connection informations in "external" section
 +
  type: internal
 +
  internal:
 +
    # set the service account to be used, default if left empty
 +
    serviceAccountName: ""
 +
    # mount the service account token
 +
    automountServiceAccountToken: false
 +
    image:
 +
      repository: goharbor/harbor-db
 +
      tag: v2.5.3
 +
    # The initial superuser password for internal database
 +
    password: "changeit"
 +
    # The size limit for Shared memory, pgSQL use it for shared_buffer
 +
    # More details see:
 +
    # https://github.com/goharbor/harbor/issues/15034
 +
    shmSizeLimit: 512Mi
 +
    # resources:
 +
    #  requests:
 +
    #    memory: 256Mi
 +
    #    cpu: 100m
 +
    nodeSelector: {}
 +
    tolerations: []
 +
    affinity: {}
 +
    ## The priority class to run the pod as
 +
    priorityClassName:
 +
    initContainer:
 +
      migrator: {}
 +
      # resources:
 +
      #  requests:
 +
      #    memory: 128Mi
 +
      #    cpu: 100m
 +
      permissions: {}
 +
      # resources:
 +
      #  requests:
 +
      #    memory: 128Mi
 +
      #    cpu: 100m
 +
  external:
 +
    host: "192.168.0.1"
 +
    port: "5432"
 +
    username: "user"
 +
    password: "password"
 +
    coreDatabase: "registry"
 +
    notaryServerDatabase: "notary_server"
 +
    notarySignerDatabase: "notary_signer"
 +
    # "disable" - No SSL
 +
    # "require" - Always SSL (skip verification)
 +
    # "verify-ca" - Always SSL (verify that the certificate presented by the
 +
    # server was signed by a trusted CA)
 +
    # "verify-full" - Always SSL (verify that the certification presented by the
 +
    # server was signed by a trusted CA and the server host name matches the one
 +
    # in the certificate)
 +
    sslmode: "disable"
 +
  # The maximum number of connections in the idle connection pool per pod (core+exporter).
 +
  # If it <=0, no idle connections are retained.
 +
  maxIdleConns: 100
 +
  # The maximum number of open connections to the database per pod (core+exporter).
 +
  # If it <= 0, then there is no limit on the number of open connections.
 +
  # Note: the default number of connections is 1024 for postgre of harbor.
 +
  maxOpenConns: 900
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
 
 +
redis:
 +
  # if external Redis is used, set "type" to "external"
 +
  # and fill the connection informations in "external" section
 +
  type: internal
 +
  internal:
 +
    # set the service account to be used, default if left empty
 +
    serviceAccountName: ""
 +
    # mount the service account token
 +
    automountServiceAccountToken: false
 +
    image:
 +
      repository: goharbor/redis-photon
 +
      tag: v2.5.3
 +
    # resources:
 +
    #  requests:
 +
    #    memory: 256Mi
 +
    #    cpu: 100m
 +
    nodeSelector: {}
 +
    tolerations: []
 +
    affinity: {}
 +
    ## The priority class to run the pod as
 +
    priorityClassName:
 +
  external:
 +
    # support redis, redis+sentinel
 +
    # addr for redis: <host_redis>:<port_redis>
 +
    # addr for redis+sentinel: <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
 +
    addr: "192.168.0.2:6379"
 +
    # The name of the set of Redis instances to monitor, it must be set to support redis+sentinel
 +
    sentinelMasterSet: ""
 +
    # The "coreDatabaseIndex" must be "0" as the library Harbor
 +
    # used doesn't support configuring it
 +
    coreDatabaseIndex: "0"
 +
    jobserviceDatabaseIndex: "1"
 +
    registryDatabaseIndex: "2"
 +
    chartmuseumDatabaseIndex: "3"
 +
    trivyAdapterIndex: "5"
 +
    password: ""
 +
  ## Additional deployment annotations
 +
  podAnnotations: {}
 +
 
 +
exporter:
 +
  replicas: 1
 +
  revisionHistoryLimit: 10
 +
# resources:
 +
#  requests:
 +
#    memory: 256Mi
 +
#    cpu: 100m
 +
  podAnnotations: {}
 +
  serviceAccountName: ""
 +
  # mount the service account token
 +
  automountServiceAccountToken: false
 +
  image:
 +
    repository: goharbor/harbor-exporter
 +
    tag: v2.5.3
 +
  nodeSelector: {}
 +
  tolerations: []
 +
  affinity: {}
 +
  cacheDuration: 23
 +
  cacheCleanInterval: 14400
 +
  ## The priority class to run the pod as
 +
  priorityClassName:
 +
 
 +
metrics:
 +
  enabled: true
 +
  core:
 +
    path: /metrics
 +
    port: 8001
 +
  registry:
 +
    path: /metrics
 +
    port: 8001
 +
  jobservice:
 +
    path: /metrics
 +
    port: 8001
 +
  exporter:
 +
    path: /metrics
 +
    port: 8001
 +
  ## Create prometheus serviceMonitor to scrape harbor metrics.
 +
  ## This requires the monitoring.coreos.com/v1 CRD. Please see
 +
  ## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md
 +
  ##
 +
  serviceMonitor:
 +
    enabled: false
 +
    additionalLabels: {}
 +
    # Scrape interval. If not set, the Prometheus default scrape interval is used.
 +
    interval: ""
 +
    # Metric relabel configs to apply to samples before ingestion.
 +
    metricRelabelings: []
 +
      # - action: keep
 +
      #  regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
 +
      #  sourceLabels: [__name__]
 +
    # Relabel configs to apply to samples before ingestion.
 +
    relabelings: []
 +
      # - sourceLabels: [__meta_kubernetes_pod_node_name]
 +
      #  separator: ;
 +
      #  regex: ^(.*)$
 +
      #  targetLabel: nodename
 +
      #  replacement: $1
 +
      #  action: replace
 +
 
 +
trace:
 +
  enabled: false
 +
  # trace provider: jaeger or otel
 +
  # jaeger should be 1.26+
 +
  provider: jaeger
 +
  # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
 +
  sample_rate: 1
 +
  # namespace used to differentiate different harbor services
 +
  # namespace:
 +
  # attributes is a key value dict contains user defined attributes used to initialize trace provider
 +
  # attributes:
 +
  #  application: harbor
 +
  jaeger:
 +
    # jaeger supports two modes:
 +
    #  collector mode(uncomment endpoint and uncomment username, password if needed)
 +
    #  agent mode(uncomment agent_host and agent_port)
 +
    endpoint: http://hostname:14268/api/traces
 +
    # username:
 +
    # password:
 +
    # agent_host: hostname
 +
    # export trace data by jaeger.thrift in compact mode
 +
    # agent_port: 6831
 +
  otel:
 +
    endpoint: hostname:4318
 +
    url_path: /v1/traces
 +
    compression: false
 +
    insecure: true
 +
    timeout: 10s
 +
 
 +
 
 +
</pre>                                                                   
 +
== helm install安装Harbor==                                                                     
 +
<pre>                                                                                               
 +
helm install  harbor  .  -n  harbor                                                                         
 +
kubectl  -n  harbor  get pod -o wide                                                                       
 +
kubectl  -n  harbor  get svc                                                                               
 +
kubectl  -n  harbor  get pod -o wide 
 +
 
 +
 
 +
一切都 running 后  打开 10/171:30002 端口 登录Harbor UI界面
 +
helm uninstall harbor -n harbor
 +
</pre>
 +
 
 +
== harbor usage==
 +
<pre>
 +
 
 +
root@myxps:~# cat /etc/docker/daemon.json
 +
{
 +
  "registry-mirrors": ["https://747qfuir.mirror.aliyuncs.com"],
 +
  "insecure-registries": ["myharbor.com"]
 +
}
 +
 
 +
systemctl  daemon-reload && systemctl restart  docker
 +
 
 +
#在你要拉你的私库的节点上添加  我主要是node
 +
[root@k8s-node1 ~] vi /etc/containerd/config.toml
 +
 
 +
  [plugins."io.containerd.snapshotter.v1.devmapper"]
 +
    root_path = ""
 +
    pool_name = ""
 +
    base_image_size = ""
 +
    async_remove = false
 +
 
 +
#这些加到最后
 +
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.linuxsa.org:30984"]
 +
          endpoint = ["https://harbor.linuxsa.org:30984"]
 +
  [plugins."io.containerd.grpc.v1.cri".registry.configs]
 +
    [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.linuxsa.org:30984".tls]
 +
      insecure_skip_verify = true
 +
    [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.linuxsa.org:30984".auth]
 +
      username = "admin"
 +
      password = "evan12345678"
 +
 
 +
 
 +
##应该这个例如我的harbor 192.168.10.104 添加在 mirrors 后面
 +
 
 +
    [plugins."io.containerd.grpc.v1.cri".registry]
 +
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.10.104"]
 +
 
 +
 
 +
</pre>
 +
[https://blog.csdn.net/networken/article/details/107502461  harbor安装并配置https和使用]
 +
 
 +
==renference==   
 +
https://goharbor.io/docs/2.5.0/install-config/harbor-ha-helm/                                                                                                               
 +
                                                                                                 
 +
[https://blog.51cto.com/lidabai/5195706OA helm部署Harbor,实现高可用的镜像仓库] 
 +
 
 +
[https://blog.csdn.net/miss1181248983/article/details/108931182  helm部署harbor] 
 +
</pre>
 +
 
 +
=docker compose install=
 +
<pre>
 +
 
 +
 
 +
tar xvf harbor-offline-installer-v2.8.4.tgz  -C harbor
 +
 
 +
./prepare
 +
 
 +
./install.sh
 +
 
 +
#运行前改配置
 +
 
 +
#hostname: reg.mydomain.com
 +
hostname: myharbor.com
 +
 
 +
# http related config
 +
http:
 +
  # port for http, default is 80. If https enabled, this port will redirect to https port
 +
  port: 80
 +
 
 +
# https related config
 +
#https:
 +
#  # https port for harbor, default is 443
 +
#  port: 443
 +
#  # The path of cert and key files for nginx
 +
#  certificate: /your/certificate/path
 +
#  private_key: /your/private/key/path
 +
#
 +
# # Uncomment following will enable tls communication between all harbor components
 +
# internal_tls:
 +
#  # set enabled to true means internal tls is enabled
 +
#  enabled: true
 +
#  # put your cert and key files on dir
 +
#  dir: /etc/harbor/tls/internal
 +
 
 +
# Uncomment external_url if you want to enable external proxy
 +
# And when it enabled the hostname will no longer used
 +
# external_url: https://reg.mydomain.com:8433
 +
 
 +
# The initial password of Harbor admin
 +
# It only works in first time to install harbor
 +
# Remember Change the admin password from UI after launching Harbor.
 +
harbor_admin_password: evan12344
 +
#harbor_admin_password: Harbor12345
 +
 
 +
 
 +
</pre>
 +
 
 +
 
 +
=harbor之高级 进阶 done=
 +
 
 +
上面 helm 用的是nodePort  应该要搞成  ingress
 +
 
 +
 
 +
 
 +
 
 +
== 自启动==
 +
<pre>
 +
 
 +
cat /data/harborstart
 +
#!/bin/bash
 +
cd  /root/harbor/harbor
 +
docker-compose start
 +
 
 +
 
 +
cat /etc/rc.local
 +
#!/bin/sh -e
 +
#
 +
# rc.local
 +
/data/harborstart
 +
 
 +
</pre>
 +
==待完善==
 +
https  虽然可以加 domain:port
 +
 
 +
==harbor see also ==
 +
 
 +
 
 +
 
 +
 
 +
 
 +
[https://blog.csdn.net/reblue520/article/details/117835549  配置harbor支持域名以https方式对外提供服务]
 
[https://www.cnblogs.com/bytefish/p/8452190.html 安装Harbor1.4.0开源docker镜像仓库(含letsencrypt证书]
 
[https://www.cnblogs.com/bytefish/p/8452190.html 安装Harbor1.4.0开源docker镜像仓库(含letsencrypt证书]
 +
 +
[https://blog.51cto.com/coderaction/5252290  最详细最简单的教程,搭建一个高可用的harbor镜像仓库 ]
 +
 +
[https://www.cnblogs.com/wenyang321/p/14150926.html  第六篇 kubernetes helm部署harbor镜像仓库 ]
 +
 +
[https://www.jianshu.com/p/7f16f9ef40ff k8s 中部署harbor]
 +
 +
[https://goharbor.io/docs/2.1.0/install-config/configure-https/ goharbor. configure-https]
 +
 +
 +
[https://www.cnblogs.com/wenyang321/p/14150926.html  第六篇 kubernetes helm部署harbor镜像仓库 ]
 +
 +
 +
 +
 +
 +
[https://www.cnblogs.com/hahaha111122222/p/11956347.html Harbor仓库配置https访问]
 +
 +
[https://blog.51cto.com/wutengfei/3741676  使用docker-compose部署Harbor v2.3.2 and https ]
  
 
https://blog.51cto.com/u_13043516/2365284
 
https://blog.51cto.com/u_13043516/2365284
 +
[https://www.cnblogs.com/cwshuo/p/15032762.html harbor的简单使用]
 +
 +
[https://www.cnblogs.com/yinzhengjie/p/11706627.html Docker Private Registry 常用组件]
 +
 +
[https://blog.csdn.net/wnccmyr/article/details/110140302  Harbor 介绍及安装部署]
 +
 +
[https://blog.51cto.com/lzlutao/2388635 Harbor安装配置全过程]
 +
 +
[https://www.cnblogs.com/yinzhengjie/p/12233594.html Docker自建仓库之Harbor部署实战]
 +
 +
=nexus3=
 +
 +
[https://blog.csdn.net/lanwp5302/article/details/86517301  nexus3安装与使用]
  
 
=初入门=
 
=初入门=
第47行: 第1,327行:
 
Get https://192.168.88.52:5000/v2/: http: server gave HTTP response to HTTPS client
 
Get https://192.168.88.52:5000/v2/: http: server gave HTTP response to HTTPS client
  
vi  /etc/docker/daemon.json
+
@myxps:~# cat /etc/docker/daemon.json  
{ "insecure-registries":["192.168.88.52:5000"] }
+
{
 +
  "registry-mirrors": ["https://747qfuir.mirror.aliyuncs.com"],
 +
  "insecure-registries": ["192.168.10.171:30002"]  
 +
}
  
 
systemctl restart docker
 
systemctl restart docker

2023年10月24日 (二) 04:09的最新版本

Helm部署Harbor,实现高可用的镜像仓库

harbor US: [ˈhɑrbər] n.港口;港湾;

添加Harbor 官方Helm Chart仓库

$ helm repo add harbor  https://helm.goharbor.io
"harbor" has been added to your repositories
$ helm repo  list                  # 查看添加的Chart
NAME    URL                     
harbor  https://helm.goharbor.io

下载Chart包到本地

$ helm search repo harbor         # 搜索chart包 版本可能不一样
NAME          CHART VERSION APP VERSION DESCRIPTION                                       
harbor/harbor 1.8.2         2.4.2       An open source trusted cloud native registry th...
$ helm pull harbor/harbor   # 下载Chart包
$ tar zxvf harbor-1.8.2.tgz   # 解压包

创建命名空间

kubectl  delete  namespaces  harbor                                                                                  

#创建命名空间                                                                                                                                                                                                                                kubectl create   namespace harbor   

创建NFS外部供应商

请见其它相关wiki

创建运行的sa账号并做RBAC授权

cat  nfs-provisioner.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-provisioner
  namespace: harbor
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: nfs-provisioner-cr
rules:
 - apiGroups: [""]
   resources: ["persistentvolumes"]
   verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
   resources: ["persistentvolumeclaims"]
   verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
   resources: ["storageclasses"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["events"]
   verbs: ["create", "update", "patch"]
 - apiGroups: [""]
   resources: ["services", "endpoints"]
   verbs: ["get"]
 - apiGroups: ["extensions"]
   resources: ["podsecuritypolicies"]
   resourceNames: ["nfs-provisioner"]
   verbs: ["use"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: run-nfs-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-provisioner
    namespace: harbor
roleRef:
  kind: ClusterRole
  name: nfs-provisioner-cr
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: nfs-role
  namespace: harbor
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get","list","watch","create","update","patch"]

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-provisioner
  namespace: harbor
subjects:
 - kind: ServiceAccount
   name: nfs-provisioner
   namespace: harbor
roleRef:
 kind: Role
 name: nfs-role
 apiGroup: rbac.authorization.k8s.io

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-proversitioner
  namespace: harbor
spec:
  selector:
    matchLabels:
      app: nfs-provisioner
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-provisioner
    spec:
      serviceAccount: nfs-provisioner
      containers:
      - name: nfs-provisioner
        image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: nfs-client-root
          mountPath: /persistentvolumes
        env:
          - name: PROVISIONER_NAME
            value: example.com/nfs
          - name: NFS_SERVER
            value: 192.168.10.175
          - name: NFS_PATH
            value: /data/nfs/harbor
      volumes:
      - name: nfs-client-root
        nfs:
          server: 192.168.10.175 
          path: /data/nfs/harbor




 kubectl  apply -f  nfs-provisioner.yaml    

kubectl -n harbor get pod 
nfs-proversitioner-5b8bf5b8b9-m879p     1/1     Running   0          69m
                                                                   
    

cat harbor-storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: harbor-storageclass
  namespace: harbor
provisioner: example.com/nfs


 kubectl  apply -f  harbor-storageclass.yaml                                                                   
 kubectl  -n  harbor  get storageclass  

NAME                  PROVISIONER       RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
harbor-storageclass   example.com/nfs   Delete          Immediate           false                  5d20h

修改values.yaml配置

     
cd harbor   

#注意  使用nodePort且关闭tls
cat values.yaml
expose:
  # Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
  # and fill the information in the corresponding section
  type: nodePort
  tls:
    # Enable TLS or not.
    # Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
    # Note: if the "expose.type" is "ingress" and TLS is disabled,
    # the port must be included in the command when pulling/pushing images.
    # Refer to https://github.com/goharbor/harbor/issues/5291 for details.
    enabled: false
    # The source of the tls certificate. Set as "auto", "secret"
    # or "none" and fill the information in the corresponding section
    # 1) auto: generate the tls certificate automatically
    # 2) secret: read the tls certificate from the specified secret.
    # The tls certificate can be generated manually or by cert manager
    # 3) none: configure no tls certificate for the ingress. If the default
    # tls certificate is configured in the ingress controller, choose this option
    certSource: auto
    auto:
      # The common name used to generate the certificate, it's necessary
      # when the type isn't "ingress"
      commonName: ""
    secret:
      # The name of secret which contains keys named:
      # "tls.crt" - the certificate
      # "tls.key" - the private key
      secretName: ""
      # The name of secret which contains keys named:
      # "tls.crt" - the certificate
      # "tls.key" - the private key
      # Only needed when the "expose.type" is "ingress".
      notarySecretName: ""
  ingress:
    hosts:
      core: core.harbor.domain
      notary: notary.harbor.domain
    # set to the type of ingress controller if it has specific requirements.
    # leave as `default` for most ingress controllers.
    # set to `gce` if using the GCE ingress controller
    # set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
    controller: default
    ## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress
    kubeVersionOverride: ""
    className: ""
    annotations:
      # note different ingress controllers may require a different ssl-redirect annotation
      # for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below
      ingress.kubernetes.io/ssl-redirect: "true"
      ingress.kubernetes.io/proxy-body-size: "0"
      nginx.ingress.kubernetes.io/ssl-redirect: "true"
      nginx.ingress.kubernetes.io/proxy-body-size: "0"
    notary:
      # notary ingress-specific annotations
      annotations: {}
      # notary ingress-specific labels
      labels: {}
    harbor:
      # harbor ingress-specific annotations
      annotations: {}
      # harbor ingress-specific labels
      labels: {}
  clusterIP:
    # The name of ClusterIP service
    name: harbor
    # Annotations on the ClusterIP service
    annotations: {}
    ports:
      # The service port Harbor listens on when serving HTTP
      httpPort: 80
      # The service port Harbor listens on when serving HTTPS
      httpsPort: 443
      # The service port Notary listens on. Only needed when notary.enabled
      # is set to true
      notaryPort: 4443
  nodePort:
    # The name of NodePort service
    name: harbor
    ports:
      http:
        # The service port Harbor listens on when serving HTTP
        port: 80
        # The node port Harbor listens on when serving HTTP
        nodePort: 30002
      https:
        # The service port Harbor listens on when serving HTTPS
        port: 443
        # The node port Harbor listens on when serving HTTPS
        nodePort: 30003
      # Only needed when notary.enabled is set to true
      notary:
        # The service port Notary listens on
        port: 4443
        # The node port Notary listens on
        nodePort: 30004
  loadBalancer:
    # The name of LoadBalancer service
    name: harbor
    # Set the IP if the LoadBalancer supports assigning IP
    IP: ""
    ports:
      # The service port Harbor listens on when serving HTTP
      httpPort: 80
      # The service port Harbor listens on when serving HTTPS
      httpsPort: 443
      # The service port Notary listens on. Only needed when notary.enabled
      # is set to true
      notaryPort: 4443
    annotations: {}
    sourceRanges: []

# The external URL for Harbor core service. It is used to
# 1) populate the docker/helm commands showed on portal
# 2) populate the token service URL returned to docker/notary client
#
# Format: protocol://domain[:port]. Usually:
# 1) if "expose.type" is "ingress", the "domain" should be
# the value of "expose.ingress.hosts.core"
# 2) if "expose.type" is "clusterIP", the "domain" should be
# the value of "expose.clusterIP.name"
# 3) if "expose.type" is "nodePort", the "domain" should be
# the IP address of k8s node
#
# If Harbor is deployed behind the proxy, set it as the URL of proxy
externalURL: http://192.168.10.171:30002

# The internal TLS used for harbor components secure communicating. In order to enable https
# in each components tls cert files need to provided in advance.
internalTLS:
  # If internal TLS enabled
  enabled: false
  # There are three ways to provide tls
  # 1) "auto" will generate cert automatically
  # 2) "manual" need provide cert file manually in following value
  # 3) "secret" internal certificates from secret
  certSource: "auto"
  # The content of trust ca, only available when `certSource` is "manual"
  trustCa: ""
  # core related cert configuration
  core:
    # secret name for core's tls certs
    secretName: ""
    # Content of core's TLS cert file, only available when `certSource` is "manual"
    crt: ""
    # Content of core's TLS key file, only available when `certSource` is "manual"
    key: ""
  # jobservice related cert configuration
  jobservice:
    # secret name for jobservice's tls certs
    secretName: ""
    # Content of jobservice's TLS key file, only available when `certSource` is "manual"
    crt: ""
    # Content of jobservice's TLS key file, only available when `certSource` is "manual"
    key: ""
  # registry related cert configuration
  registry:
    # secret name for registry's tls certs
    secretName: ""
    # Content of registry's TLS key file, only available when `certSource` is "manual"
    crt: ""
    # Content of registry's TLS key file, only available when `certSource` is "manual"
    key: ""
  # portal related cert configuration
  portal:
    # secret name for portal's tls certs
    secretName: ""
    # Content of portal's TLS key file, only available when `certSource` is "manual"
    crt: ""
    # Content of portal's TLS key file, only available when `certSource` is "manual"
    key: ""
  # chartmuseum related cert configuration
  chartmuseum:
    # secret name for chartmuseum's tls certs
    secretName: ""
    # Content of chartmuseum's TLS key file, only available when `certSource` is "manual"
    crt: ""
    # Content of chartmuseum's TLS key file, only available when `certSource` is "manual"
    key: ""
  # trivy related cert configuration
  trivy:
    # secret name for trivy's tls certs
    secretName: ""
    # Content of trivy's TLS key file, only available when `certSource` is "manual"
    crt: ""
    # Content of trivy's TLS key file, only available when `certSource` is "manual"
    key: ""

ipFamily:
  # ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component
  ipv6:
    enabled: true
  # ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component
  ipv4:
    enabled: true

# The persistence is enabled by default and a default StorageClass
# is needed in the k8s cluster to provision volumes dynamically.
# Specify another StorageClass in the "storageClass" or set "existingClaim"
# if you already have existing persistent volumes to use
#
# For storing images and charts, you can also use "azure", "gcs", "s3",
# "swift" or "oss". Set it in the "imageChartStorage" section
persistence:
  enabled: true
  # Setting it to "keep" to avoid removing PVCs during a helm delete
  # operation. Leaving it empty will delete PVCs after the chart deleted
  # (this does not apply for PVCs that are created for internal database
  # and redis components, i.e. they are never deleted automatically)
  resourcePolicy: "keep"
  persistentVolumeClaim:
    registry:
      # Use the existing PVC which must be created manually before bound,
      # and specify the "subPath" if the PVC is shared with other components
      existingClaim: ""
      # Specify the "storageClass" used to provision the volume. Or the default
      # StorageClass will be used (the default).
      # Set it to "-" to disable dynamic provisioning
      storageClass: "harbor-storageclass"
      subPath: ""
      accessMode: ReadWriteMany
      size: 5Gi
      annotations: {}
    chartmuseum:
      existingClaim: ""
      storageClass: "harbor-storageclass"
      subPath: ""
      accessMode: ReadWriteMany
      size: 5Gi
      annotations: {}
    jobservice:
      existingClaim: ""
      storageClass: "harbor-storageclass"
      subPath: ""
      accessMode: ReadWriteMany
      size: 1Gi
      annotations: {}
    # If external database is used, the following settings for database will
    # be ignored
    database:
      existingClaim: ""
      storageClass: "harbor-storageclass"
      subPath: ""
      accessMode: ReadWriteMany
      size: 1Gi
      annotations: {}
    # If external Redis is used, the following settings for Redis will
    # be ignored
    redis:
      existingClaim: ""
      storageClass: "harbor-storageclass"
      subPath: ""
      accessMode: ReadWriteMany
      size: 1Gi
      annotations: {}
    trivy:
      existingClaim: ""
      storageClass: "harbor-storageclass"
      subPath: ""
      accessMode: ReadWriteMany
      size: 5Gi
      annotations: {}
  # Define which storage backend is used for registry and chartmuseum to store
  # images and charts. Refer to
  # https://github.com/docker/distribution/blob/master/docs/configuration.md#storage
  # for the detail.
  imageChartStorage:
    # Specify whether to disable `redirect` for images and chart storage, for
    # backends which not supported it (such as using minio for `s3` storage type), please disable
    # it. To disable redirects, simply set `disableredirect` to `true` instead.
    # Refer to
    # https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect
    # for the detail.
    disableredirect: false
    # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate.
    # The secret must contain keys named "ca.crt" which will be injected into the trust store
    # of registry's and chartmuseum's containers.
    # caBundleSecretName:

    # Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift",
    # "oss" and fill the information needed in the corresponding section. The type
    # must be "filesystem" if you want to use persistent volumes for registry
    # and chartmuseum
    type: filesystem
    filesystem:
      rootdirectory: /storage
      #maxthreads: 100
    azure:
      accountname: accountname
      accountkey: base64encodedaccountkey
      container: containername
      #realm: core.windows.net
    gcs:
      bucket: bucketname
      # The base64 encoded json file which contains the key
      encodedkey: base64-encoded-json-key-file
      #rootdirectory: /gcs/object/name/prefix
      #chunksize: "5242880"
    s3:
      region: us-west-1
      bucket: bucketname
      #accesskey: awsaccesskey
      #secretkey: awssecretkey
      #regionendpoint: http://myobjects.local
      #encrypt: false
      #keyid: mykeyid
      #secure: true
      #skipverify: false
      #v4auth: true
      #chunksize: "5242880"
      #rootdirectory: /s3/object/name/prefix
      #storageclass: STANDARD
      #multipartcopychunksize: "33554432"
      #multipartcopymaxconcurrency: 100
      #multipartcopythresholdsize: "33554432"
    swift:
      authurl: https://storage.myprovider.com/v3/auth
      username: username
      password: password
      container: containername
      #region: fr
      #tenant: tenantname
      #tenantid: tenantid
      #domain: domainname
      #domainid: domainid
      #trustid: trustid
      #insecureskipverify: false
      #chunksize: 5M
      #prefix:
      #secretkey: secretkey
      #accesskey: accesskey
      #authversion: 3
      #endpointtype: public
      #tempurlcontainerkey: false
      #tempurlmethods:
    oss:
      accesskeyid: accesskeyid
      accesskeysecret: accesskeysecret
      region: regionname
      bucket: bucketname
      #endpoint: endpoint
      #internal: false
      #encrypt: false
      #secure: true
      #chunksize: 10M
      #rootdirectory: rootdirectory

imagePullPolicy: IfNotPresent

# Use this set to assign a list of default pullSecrets
imagePullSecrets:
#  - name: docker-registry-secret
#  - name: internal-registry-secret

# The update strategy for deployments with persistent volumes(jobservice, registry
# and chartmuseum): "RollingUpdate" or "Recreate"
# Set it as "Recreate" when "RWM" for volumes isn't supported
updateStrategy:
  type: RollingUpdate

# debug, info, warning, error or fatal
logLevel: info

# The initial password of Harbor admin. Change it from portal after launching Harbor
harborAdminPassword: "evan2240881"
#harborAdminPassword: "Harbor12345"

# The name of the secret which contains key named "ca.crt". Setting this enables the
# download link on portal to download the CA certificate when the certificate isn't
# generated automatically
caSecretName: ""

# The secret key used for encryption. Must be a string of 16 chars.
secretKey: "not-a-secure-key"

# The proxy settings for updating trivy vulnerabilities from the Internet and replicating
# artifacts from/to the registries that cannot be reached directly
proxy:
  httpProxy:
  httpsProxy:
  noProxy: 127.0.0.1,localhost,.local,.internal
  components:
    - core
    - jobservice
    - trivy

# Run the migration job via helm hook
enableMigrateHelmHook: false

# The custom ca bundle secret, the secret must contain key named "ca.crt"
# which will be injected into the trust store for chartmuseum, core, jobservice, registry, trivy components
# caBundleSecretName: ""

## UAA Authentication Options
# If you're using UAA for authentication behind a self-signed
# certificate you will need to provide the CA Cert.
# Set uaaSecretName below to provide a pre-created secret that
# contains a base64 encoded CA Certificate named `ca.crt`.
# uaaSecretName:

# If service exposed via "ingress", the Nginx will not be used
nginx:
  image:
    repository: goharbor/nginx-photon
    tag: v2.5.3
  # set the service account to be used, default if left empty
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  replicas: 1
  revisionHistoryLimit: 10
  # resources:
  #  requests:
  #    memory: 256Mi
  #    cpu: 100m
  nodeSelector: {}
  tolerations: []
  affinity: {}
  ## Additional deployment annotations
  podAnnotations: {}
  ## The priority class to run the pod as
  priorityClassName:

portal:
  image:
    repository: goharbor/harbor-portal
    tag: v2.5.3
  # set the service account to be used, default if left empty
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  replicas: 1
  revisionHistoryLimit: 10
  # resources:
  #  requests:
  #    memory: 256Mi
  #    cpu: 100m
  nodeSelector: {}
  tolerations: []
  affinity: {}
  ## Additional deployment annotations
  podAnnotations: {}
  ## The priority class to run the pod as
  priorityClassName:

core:
  image:
    repository: goharbor/harbor-core
    tag: v2.5.3
  # set the service account to be used, default if left empty
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  replicas: 1
  revisionHistoryLimit: 10
  ## Startup probe values
  startupProbe:
    enabled: true
    initialDelaySeconds: 10
  # resources:
  #  requests:
  #    memory: 256Mi
  #    cpu: 100m
  nodeSelector: {}
  tolerations: []
  affinity: {}
  ## Additional deployment annotations
  podAnnotations: {}
  # Secret is used when core server communicates with other components.
  # If a secret key is not specified, Helm will generate one.
  # Must be a string of 16 chars.
  secret: ""
  # Fill the name of a kubernetes secret if you want to use your own
  # TLS certificate and private key for token encryption/decryption.
  # The secret must contain keys named:
  # "tls.crt" - the certificate
  # "tls.key" - the private key
  # The default key pair will be used if it isn't set
  secretName: ""
  # The XSRF key. Will be generated automatically if it isn't specified
  xsrfKey: ""
  ## The priority class to run the pod as
  priorityClassName:
  # The time duration for async update artifact pull_time and repository
  # pull_count, the unit is second. Will be 10 seconds if it isn't set.
  # eg. artifactPullAsyncFlushDuration: 10
  artifactPullAsyncFlushDuration:

jobservice:
  image:
    repository: goharbor/harbor-jobservice
    tag: v2.5.3
  replicas: 1
  revisionHistoryLimit: 10
  # set the service account to be used, default if left empty
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  maxJobWorkers: 10
  # The logger for jobs: "file", "database" or "stdout"
  jobLoggers:
    - file
    # - database
    # - stdout
  # The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
  loggerSweeperDuration: 14 #days

  # resources:
  #   requests:
  #     memory: 256Mi
  #     cpu: 100m
  nodeSelector: {}
  tolerations: []
  affinity: {}
  ## Additional deployment annotations
  podAnnotations: {}
  # Secret is used when job service communicates with other components.
  # If a secret key is not specified, Helm will generate one.
  # Must be a string of 16 chars.
  secret: ""
  ## The priority class to run the pod as
  priorityClassName:

registry:
  # set the service account to be used, default if left empty
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  registry:
    image:
      repository: goharbor/registry-photon
      tag: v2.5.3
    # resources:
    #  requests:
    #    memory: 256Mi
    #    cpu: 100m
  controller:
    image:
      repository: goharbor/harbor-registryctl
      tag: v2.5.3

    # resources:
    #  requests:
    #    memory: 256Mi
    #    cpu: 100m
  replicas: 1
  revisionHistoryLimit: 10
  nodeSelector: {}
  tolerations: []
  affinity: {}
  ## Additional deployment annotations
  podAnnotations: {}
  ## The priority class to run the pod as
  priorityClassName:
  # Secret is used to secure the upload state from client
  # and registry storage backend.
  # See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http
  # If a secret key is not specified, Helm will generate one.
  # Must be a string of 16 chars.
  secret: ""
  # If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL.
  relativeurls: false
  credentials:
    username: "harbor_registry_user"
    password: "harbor_registry_password"
    # Login and password in htpasswd string format. Excludes `registry.credentials.username`  and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt.
    # htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string
  middleware:
    enabled: false
    type: cloudFront
    cloudFront:
      baseurl: example.cloudfront.net
      keypairid: KEYPAIRID
      duration: 3000s
      ipfilteredby: none
      # The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
      # that allows access to CloudFront
      privateKeySecret: "my-secret"
  # enable purge _upload directories
  upload_purging:
    enabled: true
    # remove files in _upload directories which exist for a period of time, default is one week.
    age: 168h
    # the interval of the purge operations
    interval: 24h
    dryrun: false

chartmuseum:
  enabled: true
  # set the service account to be used, default if left empty
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  # Harbor defaults ChartMuseum to returning relative urls, if you want using absolute url you should enable it by change the following value to 'true'
  absoluteUrl: false
  image:
    repository: goharbor/chartmuseum-photon
    tag: v2.5.3
  replicas: 1
  revisionHistoryLimit: 10
  # resources:
  #  requests:
  #    memory: 256Mi
  #    cpu: 100m
  nodeSelector: {}
  tolerations: []
  affinity: {}
  ## Additional deployment annotations
  podAnnotations: {}
  ## The priority class to run the pod as
  priorityClassName:
  ## limit the number of parallel indexers
  indexLimit: 0

trivy:
  # enabled the flag to enable Trivy scanner
  enabled: true
  image:
    # repository the repository for Trivy adapter image
    repository: goharbor/trivy-adapter-photon
    # tag the tag for Trivy adapter image
    tag: v2.5.3
  # set the service account to be used, default if left empty
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  # replicas the number of Pod replicas
  replicas: 1
  # debugMode the flag to enable Trivy debug mode with more verbose scanning log
  debugMode: false
  # vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`.
  vulnType: "os,library"
  # severity a comma-separated list of severities to be checked
  severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
  # ignoreUnfixed the flag to display only fixed vulnerabilities
  ignoreUnfixed: false
  # insecure the flag to skip verifying registry certificate
  insecure: false
  # gitHubToken the GitHub access token to download Trivy DB
  #
  # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
  # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
  # in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
  # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
  # Currently, the database is updated every 12 hours and published as a new release to GitHub.
  #
  # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
  # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
  # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
  # https://developer.github.com/v3/#rate-limiting
  #
  # You can create a GitHub token by following the instructions in
  # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
  gitHubToken: ""
  # skipUpdate the flag to disable Trivy DB downloads from GitHub
  #
  # You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
  # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
  # `/home/scanner/.cache/trivy/db/trivy.db` path.
  skipUpdate: false
  # The offlineScan option prevents Trivy from sending API requests to identify dependencies.
  #
  # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
  # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
  # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
  # It would work if all the dependencies are in local.
  # This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment.
  offlineScan: false
  # The duration to wait for scan completion
  timeout: 5m0s
  resources:
    requests:
      cpu: 200m
      memory: 512Mi
    limits:
      cpu: 1
      memory: 1Gi
  nodeSelector: {}
  tolerations: []
  affinity: {}
  ## Additional deployment annotations
  podAnnotations: {}
  ## The priority class to run the pod as
  priorityClassName:

notary:
  enabled: true
  server:
    # set the service account to be used, default if left empty
    serviceAccountName: ""
    # mount the service account token
    automountServiceAccountToken: false
    image:
      repository: goharbor/notary-server-photon
      tag: v2.5.3
    replicas: 1
    # resources:
    #  requests:
    #    memory: 256Mi
    #    cpu: 100m
    nodeSelector: {}
    tolerations: []
    affinity: {}
    ## Additional deployment annotations
    podAnnotations: {}
    ## The priority class to run the pod as
    priorityClassName:
  signer:
    # set the service account to be used, default if left empty
    serviceAccountName: ""
    # mount the service account token
    automountServiceAccountToken: false
    image:
      repository: goharbor/notary-signer-photon
      tag: v2.5.3
    replicas: 1
    # resources:
    #  requests:
    #    memory: 256Mi
    #    cpu: 100m
    nodeSelector: {}
    tolerations: []
    affinity: {}
    ## Additional deployment annotations
    podAnnotations: {}
    ## The priority class to run the pod as
    priorityClassName:
  # Fill the name of a kubernetes secret if you want to use your own
  # TLS certificate authority, certificate and private key for notary
  # communications.
  # The secret must contain keys named ca.crt, tls.crt and tls.key that
  # contain the CA, certificate and private key.
  # They will be generated if not set.
  secretName: ""

database:
  # if external database is used, set "type" to "external"
  # and fill the connection informations in "external" section
  type: internal
  internal:
    # set the service account to be used, default if left empty
    serviceAccountName: ""
    # mount the service account token
    automountServiceAccountToken: false
    image:
      repository: goharbor/harbor-db
      tag: v2.5.3
    # The initial superuser password for internal database
    password: "changeit"
    # The size limit for Shared memory, pgSQL use it for shared_buffer
    # More details see:
    # https://github.com/goharbor/harbor/issues/15034
    shmSizeLimit: 512Mi
    # resources:
    #  requests:
    #    memory: 256Mi
    #    cpu: 100m
    nodeSelector: {}
    tolerations: []
    affinity: {}
    ## The priority class to run the pod as
    priorityClassName:
    initContainer:
      migrator: {}
      # resources:
      #  requests:
      #    memory: 128Mi
      #    cpu: 100m
      permissions: {}
      # resources:
      #  requests:
      #    memory: 128Mi
      #    cpu: 100m
  external:
    host: "192.168.0.1"
    port: "5432"
    username: "user"
    password: "password"
    coreDatabase: "registry"
    notaryServerDatabase: "notary_server"
    notarySignerDatabase: "notary_signer"
    # "disable" - No SSL
    # "require" - Always SSL (skip verification)
    # "verify-ca" - Always SSL (verify that the certificate presented by the
    # server was signed by a trusted CA)
    # "verify-full" - Always SSL (verify that the certification presented by the
    # server was signed by a trusted CA and the server host name matches the one
    # in the certificate)
    sslmode: "disable"
  # The maximum number of connections in the idle connection pool per pod (core+exporter).
  # If it <=0, no idle connections are retained.
  maxIdleConns: 100
  # The maximum number of open connections to the database per pod (core+exporter).
  # If it <= 0, then there is no limit on the number of open connections.
  # Note: the default number of connections is 1024 for postgre of harbor.
  maxOpenConns: 900
  ## Additional deployment annotations
  podAnnotations: {}

redis:
  # if external Redis is used, set "type" to "external"
  # and fill the connection informations in "external" section
  type: internal
  internal:
    # set the service account to be used, default if left empty
    serviceAccountName: ""
    # mount the service account token
    automountServiceAccountToken: false
    image:
      repository: goharbor/redis-photon
      tag: v2.5.3
    # resources:
    #  requests:
    #    memory: 256Mi
    #    cpu: 100m
    nodeSelector: {}
    tolerations: []
    affinity: {}
    ## The priority class to run the pod as
    priorityClassName:
  external:
    # support redis, redis+sentinel
    # addr for redis: <host_redis>:<port_redis>
    # addr for redis+sentinel: <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
    addr: "192.168.0.2:6379"
    # The name of the set of Redis instances to monitor, it must be set to support redis+sentinel
    sentinelMasterSet: ""
    # The "coreDatabaseIndex" must be "0" as the library Harbor
    # used doesn't support configuring it
    coreDatabaseIndex: "0"
    jobserviceDatabaseIndex: "1"
    registryDatabaseIndex: "2"
    chartmuseumDatabaseIndex: "3"
    trivyAdapterIndex: "5"
    password: ""
  ## Additional deployment annotations
  podAnnotations: {}

exporter:
  replicas: 1
  revisionHistoryLimit: 10
# resources:
#  requests:
#    memory: 256Mi
#    cpu: 100m
  podAnnotations: {}
  serviceAccountName: ""
  # mount the service account token
  automountServiceAccountToken: false
  image:
    repository: goharbor/harbor-exporter
    tag: v2.5.3
  nodeSelector: {}
  tolerations: []
  affinity: {}
  cacheDuration: 23
  cacheCleanInterval: 14400
  ## The priority class to run the pod as
  priorityClassName:

metrics:
  enabled: true
  core:
    path: /metrics
    port: 8001
  registry:
    path: /metrics
    port: 8001
  jobservice:
    path: /metrics
    port: 8001
  exporter:
    path: /metrics
    port: 8001
  ## Create prometheus serviceMonitor to scrape harbor metrics.
  ## This requires the monitoring.coreos.com/v1 CRD. Please see
  ## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md
  ##
  serviceMonitor:
    enabled: false
    additionalLabels: {}
    # Scrape interval. If not set, the Prometheus default scrape interval is used.
    interval: ""
    # Metric relabel configs to apply to samples before ingestion.
    metricRelabelings: []
      # - action: keep
      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
      #   sourceLabels: [__name__]
    # Relabel configs to apply to samples before ingestion.
    relabelings: []
      # - sourceLabels: [__meta_kubernetes_pod_node_name]
      #   separator: ;
      #   regex: ^(.*)$
      #   targetLabel: nodename
      #   replacement: $1
      #   action: replace

trace:
  enabled: false
  # trace provider: jaeger or otel
  # jaeger should be 1.26+
  provider: jaeger
  # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
  sample_rate: 1
  # namespace used to differentiate different harbor services
  # namespace:
  # attributes is a key value dict contains user defined attributes used to initialize trace provider
  # attributes:
  #   application: harbor
  jaeger:
    # jaeger supports two modes:
    #   collector mode(uncomment endpoint and uncomment username, password if needed)
    #   agent mode(uncomment agent_host and agent_port)
    endpoint: http://hostname:14268/api/traces
    # username:
    # password:
    # agent_host: hostname
    # export trace data by jaeger.thrift in compact mode
    # agent_port: 6831
  otel:
    endpoint: hostname:4318
    url_path: /v1/traces
    compression: false
    insecure: true
    timeout: 10s


helm install安装Harbor

                                                                                                
 helm install  harbor  .  -n  harbor                                                                           
kubectl   -n  harbor  get pod -o wide                                                                         
kubectl   -n  harbor   get svc                                                                                
kubectl   -n  harbor  get pod -o wide  


一切都 running 后  打开 10/171:30002 端口 登录Harbor UI界面
helm uninstall harbor -n harbor

harbor usage


root@myxps:~# cat /etc/docker/daemon.json 
{
  "registry-mirrors": ["https://747qfuir.mirror.aliyuncs.com"],
  "insecure-registries": ["myharbor.com"] 
}

 systemctl  daemon-reload && systemctl restart  docker

#在你要拉你的私库的节点上添加  我主要是node 
[root@k8s-node1 ~] vi /etc/containerd/config.toml

  [plugins."io.containerd.snapshotter.v1.devmapper"]
    root_path = ""
    pool_name = ""
    base_image_size = ""
    async_remove = false

#这些加到最后 
     [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.linuxsa.org:30984"]
          endpoint = ["https://harbor.linuxsa.org:30984"]
   [plugins."io.containerd.grpc.v1.cri".registry.configs]
    [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.linuxsa.org:30984".tls]
      insecure_skip_verify = true
    [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.linuxsa.org:30984".auth]
      username = "admin"
      password = "evan12345678"


##应该这个例如我的harbor 192.168.10.104 添加在 mirrors 后面

    [plugins."io.containerd.grpc.v1.cri".registry]
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.10.104"]


harbor安装并配置https和使用

renference

https://goharbor.io/docs/2.5.0/install-config/harbor-ha-helm/

helm部署Harbor,实现高可用的镜像仓库  

helm部署harbor

docker compose install



tar xvf harbor-offline-installer-v2.8.4.tgz  -C harbor

./prepare 

./install.sh 

#运行前改配置

#hostname: reg.mydomain.com
hostname: myharbor.com

# http related config
http:
  # port for http, default is 80. If https enabled, this port will redirect to https port
  port: 80

# https related config
#https:
#  # https port for harbor, default is 443
#  port: 443
#  # The path of cert and key files for nginx
#  certificate: /your/certificate/path
#  private_key: /your/private/key/path
#
# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
#   # set enabled to true means internal tls is enabled
#   enabled: true
#   # put your cert and key files on dir
#   dir: /etc/harbor/tls/internal

# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
# external_url: https://reg.mydomain.com:8433

# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: evan12344
#harbor_admin_password: Harbor12345



harbor之高级 进阶 done

上面 helm 用的是nodePort 应该要搞成 ingress



自启动


cat /data/harborstart 
#!/bin/bash
cd  /root/harbor/harbor
docker-compose start


 cat /etc/rc.local 
#!/bin/sh -e
#
# rc.local
/data/harborstart

待完善

https  虽然可以加 domain:port

harbor see also

配置harbor支持域名以https方式对外提供服务 安装Harbor1.4.0开源docker镜像仓库(含letsencrypt证书

最详细最简单的教程,搭建一个高可用的harbor镜像仓库

第六篇 kubernetes helm部署harbor镜像仓库

k8s 中部署harbor

goharbor. configure-https


第六篇 kubernetes helm部署harbor镜像仓库



Harbor仓库配置https访问

使用docker-compose部署Harbor v2.3.2 and https

https://blog.51cto.com/u_13043516/2365284 harbor的简单使用

Docker Private Registry 常用组件

Harbor 介绍及安装部署

Harbor安装配置全过程

Docker自建仓库之Harbor部署实战

nexus3

nexus3安装与使用

初入门

docker run -d -p 5000:5000 --restart=always --name registry -v /opt/registry:/var/lib/registry registry:2

93602b4311e2        k8s.gcr.io/pause:3.1   "/pause"

commit 将一个容器提交成镜像 和指定tag
root@k8s-node1:~# docker commit 93602b4311e2  192.168.88.59:5000/mypause:v1
335sha256:0937ae67cb675168c23ede1e15408d19d235112a892f1c095c33404f50c9bf9f

docker push 192.168.88.59:5000/mypause:v1

使用Docker Registry

docker 运行

如何 docker-compose

info   Registry server 192.168.88.52

mkdir  /data/registry
#指定目录比指定配置文件更加重要
 docker run -d \
  -p 5000:5000 \
  --restart=always \
  --name registry \
  -v /data/registry:/var/lib/registry \
  registry:2



docker  images 
REPOSITORY              TAG                 IMAGE ID            CREATED             SIZE
php                     7.1-fpm-alpine      cbfebc795f0b        4 weeks ago         70.1MB

docker tag php:7.1-fpm-alpine  192.168.88.52:5000/php

[root@localhost ~]# docker push 192.168.88.52:5000/php
The push refers to repository [192.168.88.52:5000/php]
Get https://192.168.88.52:5000/v2/: http: server gave HTTP response to HTTPS client

@myxps:~# cat /etc/docker/daemon.json 
{
  "registry-mirrors": ["https://747qfuir.mirror.aliyuncs.com"],
  "insecure-registries": ["192.168.10.171:30002"] 
}

systemctl restart docker

#再次push成功
docker push 192.168.88.52:5000/php

配置SSL证书及nginx反向代理docker registry

SSL证书生成



搭建私有CA,初始化CA环境,在/etc/pki/CA/下建立证书索引数据库文件index.txt和序列号文件serial,并为证书序列号文件提供初始值。
# touch /etc/pki/CA/{index.txt,serial}
# echo 01 > /etc/pki/CA/serial


生成密钥并保存到/etc/pki/CA/private/cakey.pem
# (umask 077;openssl genrsa -out  /etc/pki/CA/private/cakey.pem 2048)

生成根证书
# openssl req -new -x509 -key  /etc/pki/CA/private/cakey.pem -out /etc/pki/CA/cacert.pem -days 3650


需要填写的信息:

Country Name (2 letter code) [XX]:CN
State or Province Name (full name) []:Beijing  
Locality Name (eg, city) [Default City]:Beijing
Organization Name (eg, company) [Default Company Ltd]:hub
Organizational Unit Name (eg, section) []:ops
Common Name (eg, your name or your server's hostname) []:hub.com
Email Address []:[email protected]


使系统信任根证书
cat /etc/pki/CA/cacert.pem >> /etc/pki/tls/certs/ca-bundle.crt

安装nginx
yum install  openssl模块已有


签发证书
创建ssl目录用来存放密钥文件和证书申请文件

mkdir /app/nginx/conf/ssl

创建密钥文件和证书申请文件
(umask 077;openssl genrsa -out /app/nginx/conf/ssl/docker.key 2048)
openssl req -new -key /app/nginx/conf/ssl/docker.key -out /app/nginx/conf/ssl/docker.csr


填写的申请信息前四项要和私有CA的信息一致

Country Name (2 letter code) [XX]:CN
State or Province Name (full name) []:Beijing
Locality Name (eg, city) [Default City]:Beijing
Organization Name (eg, company) [Default Company Ltd]:hub
Organizational Unit Name (eg, section) []:ops
Common Name (eg, your name or your server's hostname) []:hub.com
Email Address []:[email protected]

Please enter the following 'extra' attributes
to be sent with your certificate request
A challenge password []: #直接回车
An optional company name []:


签署,证书
openssl ca -in /app/nginx/conf/ssl/docker.csr -out /app/nginx/conf/ssl/docker.crt -days 3650

output 省
Certificate is to be certified until Jun 30 02:55:22 2029 GMT (3650 days)
Sign the certificate? [y/n]:y


1 out of 1 certificate requests certified, commit? [y/n]y
Write out database with 1 new entries
Data Base Updated 

配置nginx反向代理docker registry

添加认证
yum -y install httpd-tools

#docker-registry.htpasswd 文件看nginx 的配置文件便可知
htpasswd  -c /etc/nginx/conf.d/docker-registry.htpasswd test
New password: 
Re-type new password: 
Adding password for user test


[root@localhost conf.d]# cat  docker-registry.conf

upstream docker-registry {
        server 127.0.0.1:5000;
    }
server {
    listen 80;
    server_name hub.com;
    return  301 https://$server_name$request_uri; 
} 
    server {
        listen       443;
        server_name  hub.com;
        #charset koi8-r;
 
        #access_log  logs/host.access.log  main;
        ssl                   on;
        ssl_certificate       /app/nginx/conf/ssl/docker.crt;
        ssl_certificate_key   /app/nginx/conf/ssl/docker.key;

        chunked_transfer_encoding on;

            proxy_set_header        X-Forwarded-Proto "https";
                    client_max_body_size 1G;
                    proxy_connect_timeout 3000;
                    proxy_send_timeout 3000;
                    proxy_read_timeout 3000;
                    proxy_buffering    off;
                    tcp_nodelay        on;


        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always;
 
        location / {
           auth_basic   "Docker registry";
               auth_basic_user_file /etc/nginx/conf.d/docker-registry.htpasswd;
               proxy_pass  http://docker-registry;
        }
        location /_ping{
               auth_basic off;
               proxy_pass  http://docker-registry;
               }
        location /v2/_ping{
               auth_basic off;
               proxy_pass  http://docker-registry;
        }
}


systemctl  restart nginx 

registry usage

如果没有DNS解析内网域名,修改hosts文件
cat >>/etc/hosts <<EOF
192.168.88.52 hub.com
EOF

systemctl daemon-reload 
systemctl restart docker

登录
[root@localhost conf.d]#  docker login hub.com
Username: test
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

把一个容器提交为images 
[root@localhost conf.d]# docker  ps 
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                    NAMES
6d5f17124090        registry:2          "/entrypoint.sh /etc…"   2 hours ago         Up 20 minutes       0.0.0.0:5000->5000/tcp   registry
[root@localhost conf.d]# docker commit 6d5f17124090 hub.com/registry:v2
sha256:d7af5d03593d7f60903dc9da2b9a4d1d0a1a70878e0a7a09423372261cb4fccb
[root@localhost conf.d]# docker push hub.com/registry:v2
The push refers to repository [hub.com/registry]

上传镜像

 docker tag  nginx hub.com/nginx
 docker push  hub.com/nginx

查看
curl -u test:test https://hub.com/v2/_catalog
{"repositories":["httpd","nginx","php"]}


client

局域网内其他机器认证(192.168.88.60 ubuntu

其它机器
cat >>/etc/hosts <<EOF
192.168.88.52 hub.com
EOF

把CA的密钥发送到客户机,并添加到ca-bundle.crt

on 60 
mkdir -p /etc/pki/tls/certs/

on 52 
 scp -p /etc/pki/tls/certs/ca-bundle.crt  [email protected]:/etc/pki/tls/certs/ca-bundle.crt
 scp -p /etc/pki/CA/cacert.pem [email protected]:/etc/pki/CA/cacert.pem

#on 60
 cat /etc/pki/CA/cacert.pem >> /etc/pki/tls/certs/ca-bundle.crt
重启docker
systemctl  restart  docker

trouble

问题

[root@localhost conf.d]# docker login hub.com
Username: test
Password: 
Error response from daemon: login attempt to https://hub.com/v2/ failed with status: 404 Not Found

host 搞错 要前面是ip 后面是域名


一开始注释了ssl证书
Username: test
Password: 
Error response from daemon: Get https://hub.com/v2/: http: server gave HTTP response to HTTPS client

ssl err 


#on ubuntu 
root@k8s-node2:~# docker login  hub.com
Username: test
Password: 
Error response from daemon: Get https://hub.com/v2/: x509: certificate signed by unknown authority

 cat /etc/docker/daemon.json
{
  "insecure-registries" : ["hub.com"]
}

systemctl  restart docker


docker push unknown blob received unexpected HTTP status: 502 Bad Gateway

docker  push hub.com/httpd:2.4.16
最后老是
unknown blob

nginx 配置文件问题?

received unexpected HTTP status: 502 Bad Gateway

解决办法 在nginx.conf add 
 proxy_set_header        X-Forwarded-Proto "https";
                    client_max_body_size 1G;
                    proxy_connect_timeout 3000;
                    proxy_send_timeout 3000;
                    proxy_read_timeout 3000;
                    proxy_buffering    off;
                    tcp_nodelay        on;




proxy_set_header Host $host;
        proxy_set_header X-Real-IP       $remote_addr;
        proxy_set_header X-Forwarded-For  $proxy_add_x_forwarded_for;

see also

搭建私服-docker registry


https://docs.docker.com/registry/configuration/

Docker 私有镜像仓库的搭建及认证

k8s实战之从私有仓库拉取镜像 - kubernetes

docker login x509: certificate signed by unknown authority

docker push 出现:x509: certificate signed by unknown authority

https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry


Docker Hub 仓库使用,及搭建 Docker Registry

docker通过代理上传https协议的私服地址报错unknown blob


解决:Error response from daemon: Get https://index.docker.io/v1/search?q=openjdk&n=25: dial tcp: looku

docker login CA认证问题/添加自签发的 SSL 证书为受信任的根证书

CentOS7.4 Docker Harbor registry基于Https方式安全认证私有仓库搭建


搭建一个支持HTTPS的私有DOCKER Registry

docker私有仓库

Docker搭建本地仓库registry

部署私有Docker Registry

Moving to Docker(二):搭建一个私有registry服务

other

How To Create a Self-Signed SSL Certificate for Nginx on CentOS 7

Nginx启动SSL功能,并进行功能优化

CentOS 7 yum 安装 Nginx

changelog

2019年 07月 03日 星期三 16:23:35 CST 添加ssl