// Copyright 2022 Fastly, Inc.

package main

import (
	"context"
	"io"
	"log"
	"os"
	"sync"
	"time"

	"github.com/fastly/compute-sdk-go/fsthttp"
)

var BACKENDS = []string{"origin_0", "origin_1"}

func main() {
	fsthttp.ServeFunc(func(ctx context.Context, w fsthttp.ResponseWriter, r *fsthttp.Request) {
		// Log to the console (`fastly logs tail`) and the client.
		log := log.New(io.MultiWriter(os.Stdout, w), "", log.Ltime)
		log.Printf("Starting")
		begin := time.Now()
		var responses []string

		var wg sync.WaitGroup
		var mu = &sync.Mutex{}
		for i, url := range []string{
			"https://http-me.glitch.me/body=cat?wait=3000", // delay 3s
			"https://http-me.glitch.me/body=dog?wait=2000", // delay 2s
		} {
			wg.Add(1)
			go func(url string) {
				defer wg.Done()
				log.Printf("Starting %s", url)

				req, err := fsthttp.NewRequest(fsthttp.MethodGet, url, nil)
				if err != nil {
					log.Printf("%s: create request: %v", url, err)
					return
				}
				req.CacheOptions.Pass = true

				// Sending HTTP requests in separate goroutines is both
				// concurrent and parallel. For example, 2 requests that each
				// take 2s to return a response will take about 2s in total.
				resp, err := req.Send(ctx, BACKENDS[i])
				if err != nil {
					log.Printf("%s: send request: %v", url, err)
					return
				}

				body, err := io.ReadAll(resp.Body)
				mu.Lock()
				responses = append(responses, string(body))
				mu.Unlock()

				log.Printf("Finished %s", url)
			}(url)
		}
		wg.Wait()

		// All requests should finish in about as long as the longest individual
		// request took. That is, about 3s, rather than 2s+3s=5s.
		log.Printf("Finished after %s", time.Since(begin))
		for i, r := range responses {
			log.Printf("response %d: %s\n", i, r)
		}
	})
}