Exploring Web, API, Python, and AWS in the Development Environment

Table of Contents

Web

WSGI vs ASGI

WSGI (Web Server Gateway Interface) is the traditional synchronous standard for Python web applications, while ASGI (Asynchronous Server Gateway Interface) is the modern async-capable successor.

# WSGI application (synchronous)
def wsgi_app(environ, start_response):
    status = '200 OK'
    headers = [('Content-Type', 'text/plain')]
    start_response(status, headers)
    return [b'Hello from WSGI']

# ASGI application (asynchronous)
async def asgi_app(scope, receive, send):
    await send({
        'type': 'http.response.start',
        'status': 200,
        'headers': [[b'content-type', b'text/plain']],
    })
    await send({
        'type': 'http.response.body',
        'body': b'Hello from ASGI',
    })

https://www.fullstackpython.com/wsgi-servers.html

Django Deployment Patterns

Django applications can be deployed using various patterns. The most common production setup uses Gunicorn with Nginx as a reverse proxy.

# settings.py for production
ALLOWED_HOSTS = ['example.com', 'www.example.com']
STATIC_ROOT = '/var/www/static/'
MEDIA_ROOT = '/var/www/media/'

# Database connection pooling
DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.postgresql',
        'CONN_MAX_AGE': 600,
        'OPTIONS': {'connect_timeout': 10},
    }
}

https://docs.djangoproject.com/en/2.2/

FastAPI for Modern APIs

FastAPI provides automatic API documentation, type validation, and async support out of the box.

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel

app = FastAPI()

class Item(BaseModel):
    name: str
    price: float
    is_available: bool = True

@app.post("/items/")
async def create_item(item: Item):
    return {"item": item, "status": "created"}

@app.get("/items/{item_id}")
async def read_item(item_id: int):
    if item_id not in item_db:
        raise HTTPException(status_code=404, detail="Item not found")
    return item_db[item_id]

API

GraphQL with Python

GraphQL provides a typed query language for APIs. Python has two major implementations: Strawberry (modern, type-hint based) and Graphene (mature, class-based).

# Strawberry example (modern approach)
import strawberry
from typing import List

@strawberry.type
class Book:
    title: str
    author: str
    year: int

@strawberry.type
class Query:
    @strawberry.field
    def books(self) -> List[Book]:
        return [
            Book(title="1984", author="George Orwell", year=1949),
            Book(title="Brave New World", author="Aldous Huxley", year=1932),
        ]

schema = strawberry.Schema(query=Query)

https://github.com/prisma/graphql-config/blob/master/specification.md https://graphene-python.org/

REST Best Practices

RESTful APIs should follow standard HTTP methods and status codes, implement proper versioning, and use pagination for large datasets.

from fastapi import FastAPI, Query
from typing import Optional

app = FastAPI()

@app.get("/api/v1/users")
async def list_users(
    page: int = Query(1, ge=1),
    page_size: int = Query(50, ge=1, le=100),
    filter_role: Optional[str] = None
):
    offset = (page - 1) * page_size
    users = get_users(offset, page_size, filter_role)
    return {
        "data": users,
        "pagination": {
            "page": page,
            "page_size": page_size,
            "total": get_total_users(filter_role)
        }
    }

Python

AWS SDK (boto3) Patterns

boto3 is the AWS SDK for Python. Use resource-level APIs for high-level operations and client-level APIs for low-level control.

import boto3
from botocore.exceptions import ClientError

# High-level resource API
s3 = boto3.resource('s3')
bucket = s3.Bucket('my-bucket')

# Upload file with metadata
bucket.upload_file(
    '/tmp/data.csv',
    'uploads/data.csv',
    ExtraArgs={'Metadata': {'uploaded-by': 'pipeline'}}
)

# Low-level client API for fine-grained control
s3_client = boto3.client('s3')
try:
    response = s3_client.get_object(Bucket='my-bucket', Key='data.csv')
    data = response['Body'].read()
except ClientError as e:
    if e.response['Error']['Code'] == 'NoSuchKey':
        print("File not found")

Lambda with Python

AWS Lambda functions should be stateless, use environment variables for configuration, and implement proper error handling.

import json
import os
import boto3

dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['TABLE_NAME'])

def lambda_handler(event, context):
    try:
        # Parse input
        body = json.loads(event['body'])
        item_id = body['id']

        # Process
        response = table.get_item(Key={'id': item_id})

        # Return success
        return {
            'statusCode': 200,
            'headers': {'Content-Type': 'application/json'},
            'body': json.dumps(response.get('Item', {}))
        }
    except Exception as e:
        return {
            'statusCode': 500,
            'body': json.dumps({'error': str(e)})
        }

AWS

Terraform for AWS Infrastructure

Terraform enables infrastructure as code with state management and dependency resolution.

# main.tf
terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
  }
}

provider "aws" {
  region = var.aws_region
}

resource "aws_lambda_function" "api_handler" {
  filename         = "lambda.zip"
  function_name    = "api-handler"
  role            = aws_iam_role.lambda_role.arn
  handler         = "main.lambda_handler"
  runtime         = "python3.11"
  timeout         = 30
  memory_size     = 512

  environment {
    variables = {
      TABLE_NAME = aws_dynamodb_table.main.name
    }
  }
}

https://www.terraform.io/docs/index.html

Fargate Deployment Patterns

AWS Fargate enables serverless container deployment without managing EC2 instances.

# Task definition for Python API
{
  "family": "python-api",
  "networkMode": "awsvpc",
  "requiresCompatibilities": ["FARGATE"],
  "cpu": "256",
  "memory": "512",
  "containerDefinitions": [
    {
      "name": "api",
      "image": "123456789.dkr.ecr.us-east-1.amazonaws.com/python-api:latest",
      "portMappings": [{"containerPort": 8000, "protocol": "tcp"}],
      "environment": [
        {"name": "DATABASE_URL", "value": "postgresql://..."}
      ],
      "logConfiguration": {
        "logDriver": "awslogs",
        "options": {
          "awslogs-group": "/ecs/python-api",
          "awslogs-region": "us-east-1",
          "awslogs-stream-prefix": "ecs"
        }
      }
    }
  ]
}

https://aws.amazon.com/blogs/aws/aws-fargate/

Cost Optimization

Implement cost-saving strategies like S3 lifecycle policies, right-sizing instances, and using spot instances for non-critical workloads.

#+BEGINSRC python import boto3

s3client = boto3.client('s3')

lifecyclepolicy = { 'Rules': [ { 'Id': 'Archive old logs', 'Status': 'Enabled', 'Prefix': 'logs/', 'Transitions': [ {'Days': 30, 'StorageClass': 'STANDARDIA'}, {'Days': 90, 'StorageClass': 'GLACIER'} ], 'Expiration': {'Days': 365} } ] }

s3client.putbucketlifecycleconfiguration( Bucket='my-bucket', LifecycleConfiguration=lifecyclepolicy )

Author: Jason Walsh

j@wal.sh

Last Updated: 2025-12-22 23:10:59

build: 2025-12-29 20:07 | sha: 3c17632