Deploy HyperTest Server

This page shows to install hypertest server on a linux VM using docker compose

In this guide, we will install HyperTest in a new ubuntu VM.

The VM should preferably be latest Ubuntu version - 22.04.

If you are using AWS EC2, you can reduce cost by installing HyperTest on a spot instance and attaching an elastic IP to it.

Tech Stack Overview

Docker: Docker is an open platform for developing, shipping, and running applications

HyperTest: A No-code API testing tool

Recommended minimum resources required to run HyperTest on a VM are as follows:

4 vCPU

16 GB RAM

100 GB Disk

Prerequisites

You should have root user access in VM

Your system should have the following installed:

  1. Docker: (>= 20.10.6)

1. Installing Docker

Check if you have docker installed in your VM already by using the following command

docker -v

If you don’t have docker, install it using the following command

curl -fsSL https://get.docker.com -o get-docker.sh && sudo sh get-docker.sh

If you have an older version of docker (< 18.09.7), remove it and reinstall the latest version using above command

Check if docker is installed successfully by using the following command

docker -v

Getting Started

1. Deploy HyperTest Services

Switch to sudo user

sudo -i

Create a file named docker-compose.yml with below content

mkdir -p /opt/hypertest
cd /opt/hypertest
nano docker-compose.yaml
docker-compose.yml
```dockercompose
version: "3.8"

services:
  pg_db:
    image: ghcr.io/zalando/spilo-15:3.1-p1
    restart: always
    #    command: ["postgres", "-c", "log_statement=all"]
    environment:
      PGUSER_SUPERUSER: ht
      PGPASSWORD_SUPERUSER: pass
      PGVERSION: 15
      CONSUL_HOST: consul
      CONSUL_PORT: 8500
    #      SPILO_CONFIGURATION: |
    #        postgresql:
    #          parameters:
    #            log_statement: 'all'
    #            log_duration: 'on'
    #            log_destination: 'stderr'
    #            logging_collector: "false"

    volumes:
      - ht_pg:/home/postgres/pgdata/pgroot/data
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  redis:
    image: redis:alpine
    restart: always
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  nats:
    image: nats:latest
    restart: always
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  consul:
    image: hashicorp/consul:latest
    restart: always
    volumes:
      - ht_consul_data:/consul/data
      - ht_consul_config:/consul/config
    command: >
      sh -c "consul agent -server -bootstrap-expect=1 -ui -client=0.0.0.0 -dns-port=8600 -http-port=8500 -data-dir=/consul/data &
        echo 'started consul' && sleep 5 &&
        consul kv put -cas COLLECTOR_DB_URL postgresql://ht:pass@pg_db:5432/postgres?schema=ht_collector || echo 'Failed to set COLLECTOR_DB_URL' &&
        consul kv put -cas HYT_DB_URL postgresql://ht:pass@pg_db:5432/postgres?schema=ht_hyt || echo 'Failed to set HYT_DB_URL' &&
        consul kv put -cas NATS_CONN_URL nats://nats:4222 || echo 'Failed to set NATS_CONN_URL' &&
        echo 'done' && tail -f /dev/null"
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  pre-start-validator:
    image: public.ecr.aws/hypertestco/hypertestv2:${HYPERTEST_VERSION}
    restart: on-failure
    command: 'node dist/app/initializer.js exec pre-start-validator'
    environment:
      NODE_ENV: production
      CONSUL_HOST: consul
      CONSUL_HTTP_PORT: 8500
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  backend:
    image: public.ecr.aws/hypertestco/hypertestv2:${HYPERTEST_VERSION}
    restart: on-failure
    command: 'node dist/app/initializer.js exec start-backend'
    environment:
      NODE_ENV: production
      CONSUL_HOST: consul
      CONSUL_HTTP_PORT: 8500
      BACKEND_PORT: 6010
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  logger:
    image: public.ecr.aws/hypertestco/hypertestv2:${HYPERTEST_VERSION}
    restart: on-failure
    command: 'node dist/app/initializer.js exec start-logger'
    environment:
      NODE_ENV: production
      CONSUL_HOST: consul
      CONSUL_HTTP_PORT: 8500
      LOGGER_PORT: 3000
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  logger-consumer:
    image: public.ecr.aws/hypertestco/hypertestv2:${HYPERTEST_VERSION}
    restart: on-failure
    command: sh -c 'sleep 10 && node dist/app/initializer.js exec start-logger-consumer'
    environment:
      NODE_ENV: production
      CONSUL_HOST: consul
      CONSUL_HTTP_PORT: 8500
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  trace-ingestor:
    image: public.ecr.aws/hypertestco/hypertestv2:${HYPERTEST_VERSION}
    restart: on-failure
    command: node dist/app/initializer.js exec start-trace-ingestor
    environment:
      NODE_ENV: production
      CONSUL_HOST: consul
      CONSUL_HTTP_PORT: 8500
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  nginx:
    image: nginx:alpine
    volumes:
      - ./ht-proxy.conf:/etc/nginx/conf.d/default.conf
      - .:/home/nginx/app
    ports:
      - ${HOST_BACKEND_PORT}:6010
      - ${HOST_LOGGER_PORT}:3000
      - ${HOST_CONSUL_HTTP_PORT}:8500
    restart: always
    depends_on:
      - backend
      - logger
      - consul
    networks:
      - hypertestv2
    logging:
      options:
        max-size: 4m
        max-file: "10"

  portainer:
    image: portainer/portainer-ce:latest
    restart: always
    command: -H unix:///var/run/docker.sock --admin-password
      '$$2y$$05$$xoLoah4t.CUjVXgvfIS7FOs4RgJyEMGtyPMmPRynLEOXMGU5loTuS'
    volumes:
      - .:/home/portainer/app
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - hypertestv2

  otel-collector:
    image: otel/opentelemetry-collector-contrib:0.82.0
    restart: always
    command:
      - --config=/etc/otelcol-contrib/otel-collector.yml
    volumes:
      - ./otel-collector.yml:/etc/otelcol-contrib/otel-collector.yml
    ports:
      #      - "1888:1888" # pprof extension
      #      - "8888:8888" # Prometheus metrics exposed by the collector
      #      - "8889:8889" # Prometheus exporter metrics
      - "13133:13133" # health_check extension
      - ${HOST_COLLECTOR_GRPC_PORT}:4317 # OTLP gRPC receiver
      - "4318:4318" # OTLP http receiver
      - "55679:55679" # zpages extension
    depends_on:
      - jaeger-all-in-one
    extra_hosts:
      - "host.docker.internal:host-gateway"
    networks:
      - hypertestv2

  jaeger-all-in-one:
    image: jaegertracing/all-in-one:latest
    restart: always
    ports:
      - "16686:16686"
      - "14268"
      - "14250"
    networks:
      - hypertestv2

volumes:
  ht_pg:
  ht_consul_data:
  ht_consul_config:
networks:
  hypertestv2:
    name: hypertestv2

```

Create a file named ht-proxy.conf with below content

nano ht-proxy.conf
ht-proxy.conf
upstream backend-service {
  server backend:6010;
}

upstream logger-service {
  server logger:3000;
  ## extra-logger-starts
  ## extra-logger-ends
}

upstream consul-service {
    server consul:8500;
}


server {
  listen 6010 ;
  client_max_body_size 512M;

  location /portainer {
    rewrite ^/portainer(/.*)$ /$1 break;
    proxy_pass http://portainer:9000/;
    proxy_http_version 1.1;
    proxy_set_header Connection "";
  }

  location /portainer/api {
    proxy_set_header Upgrade $http_upgrade;
    proxy_pass http://portainer:9000/api;
    proxy_set_header Connection 'upgrade';
    proxy_http_version 1.1;
  }

  location / {
    proxy_pass http://backend-service;
    proxy_cache off;
    # access_log off;
    # error_log off;
    error_page 502 /backend-not-up.html;
  }

  location /backend-not-up.html {
    default_type text/html;
    return 418 "<b>Please wait while backend starts.
    <br>If this error persists for more than 5 minutes, please restart nginx container by ssh into HyperTest VM.
    <br>If it still does not work, contact HyperTest.";
  }

}

server {
  listen 3000;
  client_max_body_size 100M;
  location / {
    proxy_pass http://logger-service;
    proxy_cache off;
    # access_log off;
    # error_log off;
  }
}


server {
  listen 8500;
  client_max_body_size 100M;
  location / {
  proxy_pass http://consul-service;
  proxy_cache off;
    # access_log off;
    # error_log off;
  }
}

Create a file named otel-collector.yml with below content

otel-collector.yml
receivers:
  otlp:
    protocols:
      http:
      grpc:

processors:
  # batch metrics before sending to reduce API usage
  batch:

exporters:
  logging:
    loglevel: debug

  jaeger:
    endpoint: jaeger-all-in-one:14250
    tls:
      insecure: true
  otlp:
    endpoint: logger:3000
    compression: none
    tls:
      insecure: true

# https://github.com/open-telemetry/opentelemetry-collector/blob/main/extension/README.md
extensions:
  # responsible for responding to health check calls on behalf of the collector.
  health_check:
  # fetches the collector’s performance data
  pprof:
  # serves as an http endpoint that provides live debugging data about instrumented components.
  zpages:

service:
  extensions: [health_check, pprof, zpages]
  pipelines:
    traces:
      receivers: [otlp]
      processors: [batch]
      exporters: [jaeger, otlp]

Create a file name .env with below content

nano .env
.env
HOST_BACKEND_PORT=9001
HOST_LOGGER_PORT=9000
HOST_CONSUL_HTTP_PORT=9500
HOST_COLLECTOR_GRPC_PORT=4317
HYPERTEST_VERSION=0.0.0

The following are the details of env variables. These ports should be opened on your VM

  • BACKEND_PORT: 6010 - This is the port on which HyperTest Dashboard will be accessible by the users

  • LOGGER_PORT: 3000 - This port will be used internally for mirroring traffic. You have to allow incoming traffic to HyperTest on this port from your application

  • CONSUL_HTTP_PORT: 8500 - This port will be used to expose consul

  • HYPERTEST_VERSION: 0.1.3 - Version of HyperTest (Get the latest from HT team)

  • HOST_COLLECTOR_GRPC_PORT: 4317 - This is the Collector GRPC Port

Bring up the services by the following command

docker compose up -d

Verify the containers are up and running by docker ps

HT Dashboard will be accessible on http://<hypertest-vm-ip/domain>:<BACKEND_PORT>

Last updated