Concurrency in Go

Go was designed with concurrency in mind. Its key primitives are goroutines (lightweight threads) and channels (typed communication pipes).

The Go Concurrency Philosophy

"Don't communicate by sharing memory; share memory by communicating."

Instead of locking shared data, pass data between goroutines through channels.

Goroutines

A goroutine is a lightweight thread managed by the Go runtime. They're cheap: you can run thousands simultaneously.

Starting a Goroutine

// Add 'go' before a function call
go doSomething()

// With anonymous function
go func() {
    fmt.Println("Running in goroutine")
}()

// With parameters
go func(msg string) {
    fmt.Println(msg)
}("hello")

Basic Example

func main() {
    go sayHello()       // Start goroutine
    time.Sleep(100 * time.Millisecond)  // Wait for it to finish
    fmt.Println("Main done")
}

func sayHello() {
    fmt.Println("Hello from goroutine!")
}

Problem: Using time.Sleep to wait is unreliable. Use proper synchronization.

Channels

Channels are typed conduits for communication between goroutines.

Creating Channels

// Unbuffered channel
ch := make(chan int)

// Buffered channel (capacity 10)
ch := make(chan int, 10)

// Directional channels (for function parameters)
func send(ch chan<- int) { ch <- 42 }     // Send-only
func receive(ch <-chan int) { <-ch }       // Receive-only

Send and Receive

ch := make(chan int)

// Send (blocks until received)
ch <- 42

// Receive (blocks until value sent)
value := <-ch

// Receive and discard
<-ch

Basic Channel Example

func main() {
    ch := make(chan string)

    go func() {
        ch <- "Hello from goroutine!"  // Send
    }()

    msg := <-ch  // Receive (blocks until message arrives)
    fmt.Println(msg)
}

Buffered vs Unbuffered

// Unbuffered: sender blocks until receiver is ready
ch := make(chan int)

// Buffered: sender only blocks when buffer is full
ch := make(chan int, 3)
ch <- 1  // Doesn't block
ch <- 2  // Doesn't block
ch <- 3  // Doesn't block
ch <- 4  // Blocks! Buffer is full

Closing Channels

ch := make(chan int)

close(ch)  // Signal no more values will be sent

// Receiving from closed channel
value, ok := <-ch
if !ok {
    fmt.Println("Channel closed")
}

// Range over channel (stops when closed)
for value := range ch {
    fmt.Println(value)
}

Important: Only senders should close channels. Never close from the receiver side.

Common Patterns

Worker Pool

func main() {
    jobs := make(chan int, 100)
    results := make(chan int, 100)

    // Start 3 workers
    for w := 1; w <= 3; w++ {
        go worker(w, jobs, results)
    }

    // Send 5 jobs
    for j := 1; j <= 5; j++ {
        jobs <- j
    }
    close(jobs)

    // Collect results
    for a := 1; a <= 5; a++ {
        <-results
    }
}

func worker(id int, jobs <-chan int, results chan<- int) {
    for j := range jobs {
        fmt.Printf("Worker %d processing job %d\n", id, j)
        time.Sleep(time.Second)
        results <- j * 2
    }
}

Fan-Out, Fan-In

// Fan-out: multiple goroutines reading from same channel
func fanOut(input <-chan int, workers int) []<-chan int {
    channels := make([]<-chan int, workers)
    for i := 0; i < workers; i++ {
        channels[i] = process(input)
    }
    return channels
}

// Fan-in: merge multiple channels into one
func fanIn(channels ...<-chan int) <-chan int {
    out := make(chan int)
    var wg sync.WaitGroup

    for _, ch := range channels {
        wg.Add(1)
        go func(c <-chan int) {
            defer wg.Done()
            for v := range c {
                out <- v
            }
        }(ch)
    }

    go func() {
        wg.Wait()
        close(out)
    }()

    return out
}

Pipeline

// Stage 1: Generate numbers
func generate(nums ...int) <-chan int {
    out := make(chan int)
    go func() {
        for _, n := range nums {
            out <- n
        }
        close(out)
    }()
    return out
}

// Stage 2: Square numbers
func square(in <-chan int) <-chan int {
    out := make(chan int)
    go func() {
        for n := range in {
            out <- n * n
        }
        close(out)
    }()
    return out
}

// Usage
func main() {
    c := generate(2, 3, 4)
    out := square(c)

    for n := range out {
        fmt.Println(n)  // 4, 9, 16
    }
}

Select Statement

select lets you wait on multiple channel operations:

select {
case msg := <-ch1:
    fmt.Println("Received from ch1:", msg)
case msg := <-ch2:
    fmt.Println("Received from ch2:", msg)
case ch3 <- value:
    fmt.Println("Sent to ch3")
default:
    fmt.Println("No communication ready")
}

Timeout Pattern

select {
case result := <-ch:
    fmt.Println("Got result:", result)
case <-time.After(3 * time.Second):
    fmt.Println("Timeout!")
}

Non-Blocking Operations

select {
case msg := <-ch:
    fmt.Println("Received:", msg)
default:
    fmt.Println("No message available")
}

Done Channel Pattern

func worker(done <-chan struct{}, tasks <-chan int) {
    for {
        select {
        case <-done:
            fmt.Println("Worker stopping")
            return
        case task := <-tasks:
            fmt.Println("Processing:", task)
        }
    }
}

func main() {
    done := make(chan struct{})
    tasks := make(chan int)

    go worker(done, tasks)

    tasks <- 1
    tasks <- 2

    close(done)  // Signal worker to stop
}

sync Package

WaitGroup

Wait for multiple goroutines to complete:

import "sync"

func main() {
    var wg sync.WaitGroup

    for i := 0; i < 5; i++ {
        wg.Add(1)
        go func(n int) {
            defer wg.Done()
            fmt.Printf("Worker %d done\n", n)
        }(i)
    }

    wg.Wait()  // Block until all Done() calls
    fmt.Println("All workers complete")
}

Mutex

Protect shared data with mutual exclusion:

type Counter struct {
    mu    sync.Mutex
    value int
}

func (c *Counter) Increment() {
    c.mu.Lock()
    defer c.mu.Unlock()
    c.value++
}

func (c *Counter) Value() int {
    c.mu.Lock()
    defer c.mu.Unlock()
    return c.value
}

RWMutex

For read-heavy workloads:

type Cache struct {
    mu   sync.RWMutex
    data map[string]string
}

func (c *Cache) Get(key string) (string, bool) {
    c.mu.RLock()
    defer c.mu.RUnlock()
    val, ok := c.data[key]
    return val, ok
}

func (c *Cache) Set(key, value string) {
    c.mu.Lock()
    defer c.mu.Unlock()
    c.data[key] = value
}

Once

Run initialization exactly once:

var (
    instance *Config
    once     sync.Once
)

func GetConfig() *Config {
    once.Do(func() {
        instance = loadConfig()
    })
    return instance
}

Pool

Reuse objects to reduce allocations:

var bufferPool = sync.Pool{
    New: func() interface{} {
        return new(bytes.Buffer)
    },
}

func process() {
    buf := bufferPool.Get().(*bytes.Buffer)
    defer func() {
        buf.Reset()
        bufferPool.Put(buf)
    }()

    // Use buf...
}

Context Package

For cancellation, deadlines, and request-scoped values:

import "context"

// With cancellation
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

// With timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()

// With deadline
deadline := time.Now().Add(5 * time.Second)
ctx, cancel := context.WithDeadline(context.Background(), deadline)
defer cancel()

// Check if done
select {
case <-ctx.Done():
    fmt.Println("Context cancelled:", ctx.Err())
    return
default:
    // Continue working
}

Context in Functions

func fetchData(ctx context.Context, url string) ([]byte, error) {
    req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
    if err != nil {
        return nil, err
    }

    resp, err := http.DefaultClient.Do(req)
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()

    return io.ReadAll(resp.Body)
}

// Usage
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()

data, err := fetchData(ctx, "https://api.example.com/data")
if errors.Is(err, context.DeadlineExceeded) {
    fmt.Println("Request timed out")
}

Common Pitfalls

Goroutine Leaks

// BAD: Goroutine leaks if nobody receives
func bad() {
    ch := make(chan int)
    go func() {
        ch <- expensiveOperation()  // Blocked forever
    }()
    // Function returns without receiving
}

// GOOD: Use context for cancellation
func good(ctx context.Context) {
    ch := make(chan int)
    go func() {
        select {
        case ch <- expensiveOperation():
        case <-ctx.Done():
            return  // Clean exit
        }
    }()
}

Race Conditions

// BAD: Data race
counter := 0
for i := 0; i < 1000; i++ {
    go func() {
        counter++  // Race condition!
    }()
}

// GOOD: Use mutex or atomic
var counter int64
for i := 0; i < 1000; i++ {
    go func() {
        atomic.AddInt64(&counter, 1)
    }()
}

// Detect races: go run -race main.go

Closure Variable Capture

// BAD: All goroutines share same 'i'
for i := 0; i < 5; i++ {
    go func() {
        fmt.Println(i)  // Probably prints 5, 5, 5, 5, 5
    }()
}

// GOOD: Pass as parameter
for i := 0; i < 5; i++ {
    go func(n int) {
        fmt.Println(n)  // Prints 0, 1, 2, 3, 4 (unordered)
    }(i)
}

// GOOD in Go 1.22+: Loop variables are per-iteration
for i := 0; i < 5; i++ {
    go func() {
        fmt.Println(i)  // Works correctly
    }()
}

Atomic Operations

For simple counters, atomic operations are faster than mutexes:

import "sync/atomic"

var counter int64

atomic.AddInt64(&counter, 1)      // Increment
atomic.LoadInt64(&counter)         // Read
atomic.StoreInt64(&counter, 0)     // Write
atomic.SwapInt64(&counter, 10)     // Swap, returns old value
atomic.CompareAndSwapInt64(&counter, 10, 20)  // CAS

Next Steps

Continue to 10-packages-modules.md to learn about organizing Go code.