HTTP 100 Continue is an interim response indicating the server has received the request headers and the client should proceed to send the request body. This status is crucial for optimizing large payload uploads — the client sends an Expect: 100-continue header, and the server either replies 100 to accept the body or 417 to reject it early, saving bandwidth. Without this mechanism, clients would blindly transmit potentially megabytes of data only to receive a 401 or 413 after the full upload completes.
Response includes the status code, standard headers (including Content-Type), and a small diagnostic JSON body describing the request and returned status.
Simulator URL (copy in the app after load — not a normal link):
https://httpstatus.com/api/status/100
Example request:
curl -i "https://httpstatus.com/api/status/100"The server has received the request headers, and the client should proceed to send the request body (used with Expect: 100-continue header).
On this code, Inspector focuses on semantics, headers, and correctness warnings that commonly affect clients and caches.
The 100 Continue mechanism is defined in RFC 7231 Section 5.1.1. When a client includes 'Expect: 100-continue' in the request headers, it pauses before sending the body. The server inspects the headers (authentication, content-length limits, content-type) and either sends '100 Continue' to greenlight the body, or a final status like 413 Payload Too Large. Key detail: the server MUST eventually send a final response regardless of whether it sent 100. Proxies must handle 100 specially — they forward it but must not cache it. HTTP/2 deprecated the Expect header but the semantics persist in some implementations. Timeout behavior varies: most servers wait 1-2 seconds before assuming the client won't send Expect and proceed normally.
const http = require('http');
const server = http.createServer();
server.on('checkContinue', (req, res) => {
if (parseInt(req.headers['content-length']) > 10_000_000) {
res.writeHead(413, { 'Content-Type': 'application/json' });
return res.end(JSON.stringify({ error: 'Payload too large' }));
}
res.writeContinue();
// handle request normally
let body = '';
req.on('data', chunk => body += chunk);
req.on('end', () => {
res.writeHead(200).end('OK');
});
});# With aiohttp server
from aiohttp import web
async def upload_handler(request):
# aiohttp handles 100-continue via expect_handler
content_length = request.content_length or 0
if content_length > 10_000_000:
raise web.HTTPRequestEntityTooLarge(
max_size=10_000_000,
actual_size=content_length
)
data = await request.read()
return web.Response(text=f'Received {len(data)} bytes')
app = web.Application()
app.router.add_post('/upload', upload_handler)// Spring Boot Filter for 100-continue validation
@Component
public class ContinueFilter extends OncePerRequestFilter {
@Override
protected void doFilterInternal(
HttpServletRequest req, HttpServletResponse res,
FilterChain chain) throws ServletException, IOException {
String expect = req.getHeader("Expect");
long length = req.getContentLengthLong();
if ("100-continue".equalsIgnoreCase(expect) && length > 10_000_000) {
res.sendError(413, "Payload too large");
return;
}
chain.doFilter(req, res);
}
}package main
import (
"fmt"
"io"
"net/http"
)
func uploadHandler(w http.ResponseWriter, r *http.Request) {
if r.ContentLength > 10_000_000 {
http.Error(w, "Payload too large", http.StatusRequestEntityTooLarge)
return
}
// Go's net/http automatically sends 100 Continue
// when the handler reads the body
body, _ := io.ReadAll(r.Body)
fmt.Fprintf(w, "Received %d bytes", len(body))
}