From 048e0961e8b684243abf358738ec05f16d889ff3 Mon Sep 17 00:00:00 2001 From: sarahxsanders Date: Sun, 7 Dec 2025 14:13:40 -0500 Subject: [PATCH 1/2] docs: add error and schema management guides --- website/pages/docs/_meta.ts | 2 + .../pages/docs/production-errors-debug.mdx | 564 ++++++++++++++++ .../docs/production-schema-management.mdx | 625 ++++++++++++++++++ 3 files changed, 1191 insertions(+) create mode 100644 website/pages/docs/production-errors-debug.mdx create mode 100644 website/pages/docs/production-schema-management.mdx diff --git a/website/pages/docs/_meta.ts b/website/pages/docs/_meta.ts index 97c5bc2b2e..7493e4b32a 100644 --- a/website/pages/docs/_meta.ts +++ b/website/pages/docs/_meta.ts @@ -42,6 +42,8 @@ const meta = { title: 'FAQ', }, 'going-to-production': '', + 'production-errors-debug': '', + 'production-schema-management': '', 'scaling-graphql': '', }; diff --git a/website/pages/docs/production-errors-debug.mdx b/website/pages/docs/production-errors-debug.mdx new file mode 100644 index 0000000000..56bdd577d0 --- /dev/null +++ b/website/pages/docs/production-errors-debug.mdx @@ -0,0 +1,564 @@ +--- +title: Handle Errors and Debug GraphQL +description: Format errors safely for clients, implement custom error types, debug production issues, and build error recovery strategies for GraphQL.js applications. +--- + +# Handle Errors and Debug GraphQL + +Production GraphQL servers need robust error handling that protects sensitive information while providing developers with the context they need to diagnose issues. GraphQL's structured error format gives you control over what clients see and what gets logged internally. + +This guide shows you how to format errors safely, create custom error types, debug production problems, and implement recovery strategies for GraphQL.js applications. + +## Format errors for production + +GraphQL errors contain details that help during development but can expose sensitive information in production. Control what clients receive by formatting errors before they leave your server. + +### Remove sensitive error details + +Strip stack traces and internal messages from errors sent to clients. + +```javascript +import { graphql } from 'graphql'; + +export async function executeGraphQLRequest(schema, source, contextValue) { + const result = await graphql({ schema, source, contextValue }); + + if (result.errors) { + result.errors = result.errors.map(error => ({ + message: error.message, + locations: error.locations, + path: error.path, + extensions: { + code: error.extensions?.code || 'INTERNAL_SERVER_ERROR' + } + })); + } + + return result; +} +``` + +This example transforms errors to include only safe fields. Stack traces, original error objects, and internal details get filtered out before the response reaches the client. + +To adapt this pattern: + +- Add a `formatError` function to your GraphQL configuration +- Include extension codes that help clients handle different error types without exposing implementation details +- Log the full error internally before formatting it for the client +- Consider environment-specific formatting that shows more details in development + +### Categorize errors with codes + +Use error codes to help clients distinguish between different error types without revealing system internals. + +```javascript +export class GraphQLError extends Error { + constructor(message, code, extensions = {}) { + super(message); + this.extensions = { + code, + ...extensions + }; + } +} + +export class AuthenticationError extends GraphQLError { + constructor(message = 'Authentication required') { + super(message, 'UNAUTHENTICATED'); + } +} + +export class ForbiddenError extends GraphQLError { + constructor(message = 'Forbidden') { + super(message, 'FORBIDDEN'); + } +} + +export class ValidationError extends GraphQLError { + constructor(message, fields = []) { + super(message, 'BAD_USER_INPUT', { fields }); + } +} +``` + +These custom error classes set standardized codes that clients can handle programmatically. The extensions object carries additional context like which fields failed validation. + +To implement custom errors: + +- Throw them from your resolvers when specific conditions occur +- Document error codes in your API documentation so client developers know what to expect +- Use consistent codes across your entire API +- Add fields to extensions that help clients recover from errors without exposing sensitive data + +### Log errors with context + +Capture detailed error information for debugging while keeping client responses clean. + +```javascript +import { logger } from './logger.js'; + +export function formatError(error, context) { + logger.error('graphql_error', { + message: error.message, + code: error.extensions?.code, + path: error.path, + operationName: context.operationName, + userId: context.userId, + stackTrace: error.originalError?.stack, + timestamp: new Date().toISOString() + }); + + return { + message: error.message, + locations: error.locations, + path: error.path, + extensions: { + code: error.extensions?.code || 'INTERNAL_SERVER_ERROR' + } + }; +} +``` + +This formatter logs comprehensive error details internally while returning a sanitized version to clients. The log includes stack traces, user context, and operation details. + +When implementing error logging: + +- Include request IDs to correlate errors with other logs +- Add structured fields that your monitoring system can index and alert on +- Sanitize logged data to avoid capturing passwords or tokens +- Rate limit error logs for specific error types to prevent log flooding during incidents + +## Create custom error types + +Custom errors communicate specific problems to clients and make resolver code more maintainable. + +### Handle validation errors + +Provide detailed feedback when client input fails validation. + +```javascript +export class InputValidationError extends GraphQLError { + constructor(message, invalidFields) { + super(message, 'BAD_USER_INPUT', { + invalidFields: invalidFields.map(field => ({ + field: field.name, + message: field.message + })) + }); + } +} + +// In resolver +const UserMutations = { + createUser: async (parent, args, context) => { + const errors = validateUserInput(args.input); + + if (errors.length > 0) { + throw new InputValidationError( + 'User input validation failed', + errors + ); + } + + return createUser(args.input); + } +}; +``` + +This error type structures validation failures in a way clients can use to display field-specific error messages. The extensions include which fields failed and why. + +To use validation errors effectively: + +- Validate all input before performing any operations +- Return all validation errors at once rather than failing on the first error +- Use consistent field names that match your input types +- Consider internationalization by providing error codes that clients can translate + +### Handle resource errors + +Communicate when requested resources don't exist or aren't accessible. + +```javascript +export class NotFoundError extends GraphQLError { + constructor(resourceType, resourceId) { + super( + `${resourceType} not found`, + 'NOT_FOUND', + { resourceType, resourceId } + ); + } +} + +export class ConflictError extends GraphQLError { + constructor(message, conflictingResource) { + super(message, 'CONFLICT', { conflictingResource }); + } +} + +// In resolver +const Query = { + user: async (parent, args, context) => { + const user = await fetchUser(args.id); + + if (!user) { + throw new NotFoundError('User', args.id); + } + + if (!canAccessUser(context.userId, user.id)) { + throw new ForbiddenError('Cannot access this user'); + } + + return user; + } +}; +``` + +These error types distinguish between "not found" and "forbidden" scenarios. The extensions provide context about what resource was involved. + +When implementing resource errors: + +- Use NOT_FOUND for missing resources and FORBIDDEN when the resource exists but the user lacks access +- Include enough information in extensions to help clients display meaningful messages without exposing security details +- Consider using generic messages for forbidden resources to avoid confirming resource existence + +### Handle external service errors + +Wrap errors from external services to prevent internal details from leaking to clients. + +```javascript +export class ExternalServiceError extends GraphQLError { + constructor(serviceName, originalError) { + super( + 'External service temporarily unavailable', + 'SERVICE_UNAVAILABLE', + { serviceName } + ); + + this.originalError = originalError; + } +} + +// In resolver +const Query = { + recommendation: async (parent, args, context) => { + try { + return await recommendationService.fetch(args.userId); + } catch (error) { + logger.error('recommendation_service_error', { + error: error.message, + stack: error.stack, + userId: args.userId + }); + + throw new ExternalServiceError('recommendation', error); + } + } +}; +``` + +This pattern catches errors from external services, logs the full details internally, and returns a generic error to clients. The service name helps clients understand which feature is unavailable without exposing implementation details. + +To handle external service errors: + +- Wrap all external calls in try-catch blocks +- Log the original error with full context before transforming it +- Consider implementing retry logic before throwing errors +- Add circuit breakers for services that frequently fail to prevent cascading failures + +## Debug production issues + +Production debugging requires techniques that don't rely on attaching debuggers or adding temporary logging. + +### Add request tracing + +Track requests through your system to understand execution flow. + +```javascript +import { randomUUID } from 'crypto'; + +export function createContext(req) { + const requestId = req.headers['x-request-id'] || randomUUID(); + const startTime = Date.now(); + + return { + requestId, + startTime, + breadcrumbs: [], + + addBreadcrumb: function(category, message, data = {}) { + this.breadcrumbs.push({ + category, + message, + data, + timestamp: Date.now() - this.startTime, + requestId: this.requestId + }); + } + }; +} + +// In resolver +const Query = { + user: async (parent, args, context) => { + context.addBreadcrumb('database', 'Fetching user', { userId: args.id }); + + const user = await fetchUser(args.id); + + context.addBreadcrumb('database', 'User fetched', { + userId: args.id, + found: !!user + }); + + return user; + } +}; +``` + +This pattern creates a trail of breadcrumbs throughout request execution. Each breadcrumb records what happened and when, relative to the request start time. + +To implement request tracing: + +- Add breadcrumbs at key points in your resolvers +- Include breadcrumbs in error logs to see what happened before the error +- Keep breadcrumb data lightweight to avoid performance impact +- Send breadcrumbs to your error tracking service when errors occur +- Propagate request IDs to external services for distributed tracing + +### Enable debug mode safely + +Provide detailed error information in development without exposing it in production. + +```javascript +const isDevelopment = process.env.NODE_ENV === 'development'; + +export function formatError(error, context) { + const formatted = { + message: error.message, + locations: error.locations, + path: error.path, + extensions: { + code: error.extensions?.code || 'INTERNAL_SERVER_ERROR' + } + }; + + if (isDevelopment) { + formatted.extensions.stackTrace = error.originalError?.stack; + formatted.extensions.breadcrumbs = context.breadcrumbs; + } + + return formatted; +} +``` + +This formatter includes stack traces and breadcrumbs in development but omits them in production. Developers get the context they need locally without risk of exposing it to production clients. + +When implementing debug modes: + +- Check environment variables rather than configuration files that might accidentally enable debug mode in production +- Consider adding a secure debug endpoint that requires authentication and can be enabled temporarily for production debugging +- Never include debug information in responses to unauthenticated requests + +### Capture error context + +Include relevant state information when errors occur to aid debugging. + +```javascript +export function wrapResolver(resolver, typeName, fieldName) { + return async (parent, args, context, info) => { + try { + return await resolver(parent, args, context, info); + } catch (error) { + const errorContext = { + typeName, + fieldName, + args: sanitizeArgs(args), + userId: context.userId, + requestId: context.requestId, + parentType: parent?.constructor?.name, + breadcrumbs: context.breadcrumbs + }; + + logger.error('resolver_error', { + error: error.message, + stack: error.stack, + context: errorContext + }); + + throw error; + } + }; +} + +function sanitizeArgs(args) { + const sanitized = { ...args }; + const sensitiveFields = ['password', 'token', 'secret']; + + sensitiveFields.forEach(field => { + if (field in sanitized) { + sanitized[field] = '[REDACTED]'; + } + }); + + return sanitized; +} +``` + +This wrapper captures context about where and why an error occurred. The sanitization prevents sensitive data from appearing in logs. + +To capture useful error context: + +- Wrap all resolvers to add consistent error handling +- Include parent object types to understand resolver chains +- Add user information to correlate errors with specific users or usage patterns +- Sanitize all data before logging to prevent leaking credentials or personal information + +## Implement error recovery + +Build resilience into your GraphQL server so it can handle and recover from errors gracefully. + +### Handle partial failures + +Return successful data alongside errors instead of failing entire operations. + +```javascript +const Query = { + users: async (parent, args, context) => { + const userIds = args.ids; + const users = []; + const errors = []; + + for (const id of userIds) { + try { + const user = await fetchUser(id); + users.push(user); + } catch (error) { + errors.push({ + message: `Failed to fetch user ${id}`, + extensions: { + code: 'USER_FETCH_FAILED', + userId: id + } + }); + users.push(null); + } + } + + return users; + } +}; +``` + +This resolver attempts to fetch all requested users and returns whatever succeeded. Errors for individual users get reported without failing the entire query. + +When implementing partial failure handling: + +- Decide per-field whether partial success makes sense +- Document which fields return partial results so clients know what to expect +- Consider returning error information in the data itself for better client-side handling +- Balance between failing fast and attempting recovery based on your use case + +### Add fallback data sources + +Provide degraded service when primary data sources fail. + +```javascript +export async function fetchUserWithFallback(userId, context) { + try { + return await primaryDatabase.fetchUser(userId); + } catch (primaryError) { + logger.warn('primary_database_unavailable', { + userId, + error: primaryError.message + }); + + try { + const cachedUser = await cache.get(`user:${userId}`); + if (cachedUser) { + return { ...cachedUser, stale: true }; + } + } catch (cacheError) { + logger.error('cache_unavailable', { + userId, + error: cacheError.message + }); + } + + throw new ExternalServiceError('database', primaryError); + } +} +``` + +This function tries the primary database first, falls back to cached data if the database is unavailable, and only throws an error if both fail. The returned data includes a flag indicating it came from cache. + +To implement fallback strategies: + +- Identify which data can safely come from stale sources +- Add metadata to responses indicating when fallback data was used +- Set cache TTLs appropriate for your fallback tolerance +- Monitor fallback usage to detect infrastructure problems +- Consider returning partial data rather than failing completely + +### Implement retry logic + +Retry transient failures automatically before reporting errors to clients. + +```javascript +export async function retryableOperation(operation, maxRetries = 3) { + let lastError; + + for (let attempt = 0; attempt < maxRetries; attempt++) { + try { + return await operation(); + } catch (error) { + lastError = error; + + if (!isRetryable(error)) { + throw error; + } + + const delay = Math.min(100 * Math.pow(2, attempt), 1000); + await sleep(delay); + + logger.warn('operation_retry', { + attempt: attempt + 1, + maxRetries, + error: error.message + }); + } + } + + throw lastError; +} + +function isRetryable(error) { + const retryableCodes = ['ETIMEDOUT', 'ECONNRESET', 'ENOTFOUND']; + return retryableCodes.includes(error.code) || error.statusCode >= 500; +} + +// In resolver +const Query = { + posts: async (parent, args, context) => { + return retryableOperation(() => fetchPosts(args.limit)); + } +}; +``` + +This function implements exponential backoff for retrying failed operations. It only retries errors that are likely transient, like network timeouts or 5xx server errors. + +When adding retry logic: + +- Identify which operations are safe to retry +- Avoid retrying mutations that aren't idempotent unless you've added idempotency keys +- Set maximum retry counts to prevent hanging requests +- Use exponential backoff to avoid overwhelming failing services +- Log retry attempts to detect flaky dependencies + +## Additional error handling considerations + +Several other aspects support robust error handling in production: + +- **Error aggregation**: Group similar errors together to avoid alert fatigue and identify patterns in production issues +- **Error rate monitoring**: Track error rates over time to detect increases that indicate problems, even if individual errors seem normal +- **Client error reporting**: Implement client-side error tracking to capture errors that occur during client-side GraphQL operations +- **Graceful degradation**: Design your schema so clients can handle missing or null fields without breaking +- **Error documentation**: Document expected error codes and scenarios in your API documentation so clients can handle them appropriately diff --git a/website/pages/docs/production-schema-management.mdx b/website/pages/docs/production-schema-management.mdx new file mode 100644 index 0000000000..3f0eac5c08 --- /dev/null +++ b/website/pages/docs/production-schema-management.mdx @@ -0,0 +1,625 @@ +--- +title: Manage GraphQL Schemas +description: Version schemas safely, detect breaking changes, implement deprecation workflows, and establish governance processes for production GraphQL.js applications. +--- + +# Manage GraphQL Schemas + +GraphQL schemas change as products evolve. Adding features, fixing issues, and improving APIs require schema modifications. Managing these changes without breaking existing clients requires careful planning and consistent processes. + +This guide shows you how to version schemas, detect breaking changes before they reach production, implement deprecation workflows, and establish governance processes for GraphQL.js applications. + +## Version schemas safely + +GraphQL's strong typing and introspection enable additive changes without breaking clients. Focus on adding new fields and types rather than modifying existing ones. + +### Add fields without breaking changes + +Extend your schema by adding optional fields and new types. + +```javascript +import { GraphQLObjectType, GraphQLString, GraphQLInt } from 'graphql'; + +const UserType = new GraphQLObjectType({ + name: 'User', + fields: { + id: { type: GraphQLString }, + name: { type: GraphQLString }, + email: { type: GraphQLString }, + avatarUrl: { type: GraphQLString }, + bio: { type: GraphQLString } + } +}); + +const QueryType = new GraphQLObjectType({ + name: 'Query', + fields: { + user: { + type: UserType, + args: { id: { type: GraphQLString } } + }, + userByEmail: { + type: UserType, + args: { email: { type: GraphQLString } } + } + } +}); +``` + +Adding fields to types and new queries to your schema doesn't affect existing clients. Clients only request fields they know about, so new fields remain invisible until clients explicitly query them. + +To add fields safely: + +- Make all new fields nullable unless your API can guarantee a value +- Add required fields only to new types +- Consider adding fields with default values when appropriate +- Document new fields so clients discover them through your API documentation rather than introspection alone + +### Handle breaking changes + +Identify changes that might break existing clients before deploying them. + +Breaking changes include: + +- Removing fields or types +- Renaming fields or types +- Changing field types +- Adding required arguments +- Removing arguments +- Changing argument types from nullable to non-nullable + +```javascript +const UserType = new GraphQLObjectType({ + name: 'User', + fields: { + id: { type: GraphQLString }, + email: { type: GraphQLString } + } +}); + +const QueryType = new GraphQLObjectType({ + name: 'Query', + fields: { + users: { + type: new GraphQLList(UserType), + args: { + limit: { type: new GraphQLNonNull(GraphQLInt) } + } + } + } +}); +``` + +These changes break clients that query the removed username field or call the users query without the new required limit argument. + +When you must make breaking changes, use a deprecation workflow. Mark the old field as deprecated, add the new field or type alongside it, give clients time to migrate, then remove the deprecated field in a future release. Document breaking changes prominently and notify client teams before deploying. + +### Track schema versions + +Maintain a changelog of schema modifications to understand evolution over time. + +```javascript +/** + * Schema version: 2024-12-01 + * + * Changes: + * - added User.avatarUrl field + * - added User.bio field + * - added Query.userByEmail + * - deprecated User.username (use User.name instead) + * + * Breaking changes: none + */ + +export const schema = new GraphQLSchema({ + query: QueryType, + mutation: MutationType +}); +``` + +This comment documents what changed in this schema version, making it easier to understand evolution and communicate changes to client teams. + +To track schema versions effectively: + +- Include the date or version number in your schema comments +- Document every change with categorization: addition, deprecation, breaking +- Keep a separate changelog file that spans all versions +- Use semantic versioning if you publish your schema as a package +- Consider including the schema version in your GraphQL response headers + +## Detect breaking changes + +Automated tools catch breaking changes before they affect production clients. + +### Compare schema versions + +Generate a diff between your current and proposed schema to identify changes. + +```javascript +import { printSchema, buildSchema } from 'graphql'; +import fs from 'fs'; + +export function detectSchemaChanges(currentSchema, proposedSchema) { + const currentSDL = printSchema(currentSchema); + const proposedSDL = printSchema(proposedSchema); + + const changes = []; + + if (!fs.existsSync('schema.baseline.graphql')) { + fs.writeFileSync('schema.baseline.graphql', currentSDL); + return changes; + } + + const baseline = fs.readFileSync('schema.baseline.graphql', 'utf8'); + const baselineSchema = buildSchema(baseline); + + const currentTypes = currentSchema.getTypeMap(); + const proposedTypes = proposedSchema.getTypeMap(); + + for (const typeName in currentTypes) { + if (typeName.startsWith('__')) continue; + + if (!proposedTypes[typeName]) { + changes.push({ + type: 'BREAKING', + message: `Type ${typeName} was removed` + }); + } + } + + return changes; +} +``` + +This function compares two schemas and identifies removed types. A complete implementation would check fields, arguments, and type changes. + +To implement breaking change detection: + +- Integrate it into your CI/CD pipeline +- Fail builds that introduce breaking changes without explicit approval +- Store the baseline schema in version control +- Generate a detailed report of all changes, not just breaking ones +- Consider allowing breaking changes with an override flag for intentional releases + +### Use GraphQL Inspector + +Leverage existing tools for comprehensive schema validation. + +```javascript +import { diff } from '@graphql-inspector/core'; + +export async function validateSchemaChange(oldSchemaPath, newSchemaPath) { + const changes = await diff(oldSchemaPath, newSchemaPath); + + const breakingChanges = changes.filter(change => + change.criticality.level === 'BREAKING' + ); + + if (breakingChanges.length > 0) { + console.error('Breaking changes detected:'); + breakingChanges.forEach(change => { + console.error(`- ${change.message}`); + console.error(` Path: ${change.path}`); + }); + + process.exit(1); + } + + const safeChanges = changes.filter(change => + change.criticality.level !== 'BREAKING' + ); + + console.log(`Schema validation passed. ${safeChanges.length} safe changes detected.`); +} +``` + +GraphQL Inspector analyzes schemas and categorizes changes by severity. This example fails the validation if breaking changes exist. + +When using schema validation tools: + +- Run them in CI before merging changes +- Configure rules for which changes require manual review +- Generate reports that teams can review before deployment +- Integrate with pull request workflows to show schema changes inline +- Replace GraphQL Inspector with your preferred schema tooling while maintaining the same validation approach + +### Monitor field usage + +Track which fields clients actually query to understand the impact of removing fields. + +```javascript +import { execute } from 'graphql'; + +const fieldUsage = new Map(); + +export function trackFieldUsage(schema, document, contextValue) { + return execute({ + schema, + document, + contextValue, + fieldResolver: (source, args, context, info) => { + const fieldPath = `${info.parentType.name}.${info.fieldName}`; + + fieldUsage.set(fieldPath, { + count: (fieldUsage.get(fieldPath)?.count || 0) + 1, + lastUsed: new Date().toISOString() + }); + + const resolver = info.parentType.getFields()[info.fieldName].resolve; + if (resolver) { + return resolver(source, args, context, info); + } + return source?.[info.fieldName]; + } + }); +} + +export function getUnusedFields(schema, daysUnused = 30) { + const cutoffDate = new Date(); + cutoffDate.setDate(cutoffDate.getDate() - daysUnused); + + const allFields = []; + const typeMap = schema.getTypeMap(); + + for (const typeName in typeMap) { + if (typeName.startsWith('__')) continue; + + const type = typeMap[typeName]; + if (type.getFields) { + const fields = type.getFields(); + for (const fieldName in fields) { + allFields.push(`${typeName}.${fieldName}`); + } + } + } + + return allFields.filter(field => { + const usage = fieldUsage.get(field); + if (!usage) return true; + return new Date(usage.lastUsed) < cutoffDate; + }); +} +``` + +This implementation tracks every field access during query execution and provides a list of fields that haven't been queried recently. + +To monitor field usage effectively: + +- Export usage data to your analytics system +- Set appropriate timeframes based on your client update cycles +- Consider seasonal usage patterns before marking fields as unused +- Combine usage data with deprecation decisions +- Use usage data to prioritize which deprecated fields to remove first + +## Implement deprecation workflows + +Communicate planned changes to clients and give them time to migrate before removing fields. + +### Mark fields as deprecated + +Use GraphQL's built-in deprecation to signal fields will be removed. + +```javascript +import { GraphQLObjectType, GraphQLString } from 'graphql'; + +const UserType = new GraphQLObjectType({ + name: 'User', + fields: { + id: { type: GraphQLString }, + username: { + type: GraphQLString, + deprecationReason: 'Use `name` instead. Will be removed on 2025-03-01.' + }, + name: { type: GraphQLString }, + email: { type: GraphQLString } + } +}); +``` + +Deprecated fields appear in introspection with their deprecation reason, alerting client developers to migrate. + +When deprecating fields: + +- Provide clear migration instructions in the deprecation message +- Include a removal date to set expectations +- Offer a direct replacement field when possible +- Keep deprecated fields functional during the deprecation period +- Document deprecations in your changelog and API documentation beyond just the introspection + +### Communicate deprecations + +Notify client teams about deprecated fields they're using. + +```javascript +import { execute } from 'graphql'; + +export async function executeWithDeprecationWarnings( + schema, + document, + contextValue +) { + const deprecatedFields = []; + + const result = await execute({ + schema, + document, + contextValue, + fieldResolver: (source, args, context, info) => { + const field = info.parentType.getFields()[info.fieldName]; + + if (field.deprecationReason) { + deprecatedFields.push({ + field: `${info.parentType.name}.${info.fieldName}`, + reason: field.deprecationReason, + path: info.path + }); + } + + if (field.resolve) { + return field.resolve(source, args, context, info); + } + return source?.[info.fieldName]; + } + }); + + if (deprecatedFields.length > 0) { + console.warn('Deprecated fields used:', { + operationName: contextValue.operationName, + userId: contextValue.userId, + fields: deprecatedFields + }); + } + + return result; +} +``` + +This execution wrapper detects when clients query deprecated fields and logs warnings with context about which clients need to migrate. + +To communicate deprecations effectively: + +- Track which clients use deprecated fields +- Send targeted notifications to teams using deprecated APIs +- Provide migration guides with code examples +- Set up alerts when deprecated field usage increases +- Include deprecation warnings in GraphQL response extensions during a grace period + +### Phase out deprecated fields + +Remove deprecated fields after sufficient notice and client migration time. + +```javascript +const UserTypePhase1 = new GraphQLObjectType({ + name: 'User', + fields: { + username: { type: GraphQLString }, + name: { type: GraphQLString } + } +}); + +const UserTypePhase2 = new GraphQLObjectType({ + name: 'User', + fields: { + username: { + type: GraphQLString, + deprecationReason: 'Use `name` instead. Will be removed on 2025-03-01.' + }, + name: { type: GraphQLString } + } +}); + +const UserTypePhase3 = new GraphQLObjectType({ + name: 'User', + fields: { + name: { type: GraphQLString } + } +}); +``` + +This example shows three phases of field deprecation. Phase 1 adds the new name field alongside the existing username field on December 1, 2024. Phase 2 marks username as deprecated on January 1, 2025. Phase 3 removes the deprecated username field on March 1, 2025. This gives clients two months to discover the new field and another two months to migrate before the old field disappears. + +When removing deprecated fields: + +- Verify no clients are still using them through usage monitoring +- Provide a final warning before removal +- Document the removal in your changelog +- Consider a longer deprecation period for public APIs than internal ones +- Have a rollback plan if the removal causes unexpected issues + +## Establish governance processes + +Create consistent processes for reviewing and approving schema changes across teams. + +### Review schema changes + +Implement a review process before schema modifications reach production. + +```javascript +export const reviewChecklist = { + breakingChanges: { + description: 'Does this change break existing clients?', + required: true, + check: (changes) => { + return changes.filter(c => c.type === 'BREAKING').length === 0; + } + }, + + documentation: { + description: 'Are all new fields documented?', + required: true, + check: (schema, changes) => { + const newFields = changes.filter(c => c.type === 'FIELD_ADDED'); + return newFields.every(field => field.description); + } + }, + + naming: { + description: 'Do field names follow naming conventions?', + required: true, + check: (changes) => { + const fieldChanges = changes.filter(c => c.type === 'FIELD_ADDED'); + return fieldChanges.every(field => { + return /^[a-z][a-zA-Z0-9]*$/.test(field.name); + }); + } + }, + + deprecationReason: { + description: 'Do deprecated fields have clear reasons?', + required: true, + check: (changes) => { + const deprecated = changes.filter(c => c.type === 'FIELD_DEPRECATED'); + return deprecated.every(field => + field.deprecationReason && + field.deprecationReason.length > 10 + ); + } + } +}; + +export function runSchemaReview(schema, changes) { + const results = []; + + for (const [checkName, check] of Object.entries(reviewChecklist)) { + const passed = check.check(schema, changes); + + results.push({ + check: checkName, + description: check.description, + passed, + required: check.required + }); + } + + const failedRequired = results.filter(r => r.required && !r.passed); + + if (failedRequired.length > 0) { + console.error('Schema review failed:'); + failedRequired.forEach(r => { + console.error(`- ${r.description}`); + }); + process.exit(1); + } + + console.log('Schema review passed'); + return results; +} +``` + +This checklist validates schema changes against governance rules before allowing deployment. + +To implement schema reviews: + +- Integrate them into your pull request process +- Require passing reviews before merging schema changes +- Customize the checklist for your organization's requirements +- Add checks for your specific naming conventions and patterns +- Consider different review levels for different types of changes + +### Document schema standards + +Create guidelines for consistent schema design across your organization. + +```markdown +# GraphQL schema standards + +## Naming conventions + +- Types: PascalCase (User, BlogPost) +- Fields: camelCase (firstName, createdAt) +- Arguments: camelCase (userId, pageSize) +- Enums: SCREAMING_SNAKE_CASE (ACTIVE, PENDING_APPROVAL) + +## Field guidelines + +- Use nullable fields by default +- Make fields non-null only when guaranteed +- Prefix boolean fields with "is" or "has" (isActive, hasAccess) +- Use consistent naming across types (createdAt, not creationDate) + +## Deprecation policy + +- Minimum 60-day deprecation period for public APIs +- Minimum 30-day deprecation period for internal APIs +- Include removal date in deprecation message +- Provide migration path in deprecation reason + +## Breaking changes + +- Require VP approval for any breaking change +- Document all breaking changes in changelog +- Notify affected teams 30 days before deployment +- Provide migration guides for all breaking changes +``` + +This document establishes consistent standards that all schema contributors follow. + +When creating schema standards: + +- Involve stakeholders from all teams using your GraphQL API +- Update standards as you learn what works for your organization +- Include examples of good and bad patterns +- Make standards easily accessible to all developers +- Review and update standards annually to reflect evolved practices + +### Control schema deployment + +Implement gates that prevent problematic schemas from reaching production. + +```javascript +import { validateSchema } from './schema-validator.js'; +import { runSchemaReview } from './schema-review.js'; +import { detectBreakingChanges } from './breaking-changes.js'; + +export async function deploymentGate(proposedSchema) { + console.log('Running schema deployment validation...'); + + const validationErrors = validateSchema(proposedSchema); + if (validationErrors.length > 0) { + console.error('Schema validation failed:', validationErrors); + return false; + } + + const breakingChanges = await detectBreakingChanges(proposedSchema); + if (breakingChanges.length > 0 && !process.env.ALLOW_BREAKING_CHANGES) { + console.error('Breaking changes detected:', breakingChanges); + return false; + } + + const reviewResults = runSchemaReview(proposedSchema); + const failed = reviewResults.filter(r => r.required && !r.passed); + if (failed.length > 0) { + console.error('Schema review checks failed:', failed); + return false; + } + + const testsPassed = await runSchemaTests(); + if (!testsPassed) { + console.error('Schema tests failed'); + return false; + } + + console.log('Schema deployment validation passed'); + return true; +} +``` + +This deployment gate runs multiple checks before allowing a schema to deploy. It validates that the schema is valid GraphQL, checks for breaking changes, runs governance checks, and verifies all tests pass. Any failure prevents deployment and prevents governance violations from reaching production. + +To implement deployment gates: + +- Integrate them into your CI/CD pipeline +- Make gates strict enough to catch problems but not so strict they block legitimate changes +- Provide clear error messages when gates fail +- Allow override mechanisms for emergency deployments with appropriate approval +- Log all gate decisions for audit trails + +## Additional schema management considerations + +Several other aspects support effective schema management in production: + +- **Schema federation**: Coordinate schema changes across multiple GraphQL services in a federated architecture +- **Schema stitching**: Manage schemas from multiple sources while maintaining consistency +- **Client-driven contracts**: Let client teams propose schema changes through a formal process +- **Schema testing**: Write tests that verify schema behavior and catch unintended changes +- **Documentation generation**: Automatically generate API documentation from your schema and keep it updated with changes From f8ee226b9bf32226bcc63949f35293fad4312573 Mon Sep 17 00:00:00 2001 From: sarahxsanders Date: Sun, 7 Dec 2025 14:20:10 -0500 Subject: [PATCH 2/2] cspell --- website/pages/docs/production-errors-debug.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/pages/docs/production-errors-debug.mdx b/website/pages/docs/production-errors-debug.mdx index 56bdd577d0..f4b1814fb2 100644 --- a/website/pages/docs/production-errors-debug.mdx +++ b/website/pages/docs/production-errors-debug.mdx @@ -503,7 +503,7 @@ To implement fallback strategies: Retry transient failures automatically before reporting errors to clients. ```javascript -export async function retryableOperation(operation, maxRetries = 3) { +export async function retryOperation(operation, maxRetries = 3) { let lastError; for (let attempt = 0; attempt < maxRetries; attempt++) { @@ -512,7 +512,7 @@ export async function retryableOperation(operation, maxRetries = 3) { } catch (error) { lastError = error; - if (!isRetryable(error)) { + if (!isRetry(error)) { throw error; } @@ -530,15 +530,15 @@ export async function retryableOperation(operation, maxRetries = 3) { throw lastError; } -function isRetryable(error) { - const retryableCodes = ['ETIMEDOUT', 'ECONNRESET', 'ENOTFOUND']; - return retryableCodes.includes(error.code) || error.statusCode >= 500; +function isRetry(error) { + const retryCodes = ['ETIMEDOUT', 'ECONNRESET', 'ENOTFOUND']; + return retryCodes.includes(error.code) || error.statusCode >= 500; } // In resolver const Query = { posts: async (parent, args, context) => { - return retryableOperation(() => fetchPosts(args.limit)); + return retryOperation(() => fetchPosts(args.limit)); } }; ```