From 470c447d3517a4c1abc03d4b14acff58dade412f Mon Sep 17 00:00:00 2001
From: Aditya Mathur <57684218+MathurAditya724@users.noreply.github.com>
Date: Tue, 5 May 2026 21:28:34 +0000
Subject: [PATCH 01/14] feat: Replace Kapa AI with first-party docs-mcp chat
Remove the third-party Kapa AI widget and replace it with a custom
AI chat modal powered by the docs-mcp Cloudflare Worker.
Changes:
- Add AskAiProvider context for modal open/close state management
- Add AskAiModal chat component with streaming, multi-turn support,
markdown rendering, and example questions
- Rewire all Ask AI trigger points (search bar, header, inline search
result, ?askAI=true deep link) to use the new context
- Remove Kapa widget script, CSS, TypeScript types, and CSP entries
- Add docs-mcp.getsentry.workers.dev to CSP connect-src
- Add NEXT_PUBLIC_ASK_AI_API_URL env var
Co-Authored-By: Claude
---
.env.example | 3 +-
.opencode/plans/replace-kapa-with-docs-mcp.md | 1193 +++++++++++++++++
AGENTS.md | 30 +
app/globals.css | 4 -
app/layout.tsx | 29 +-
src/components/askAi/askAiContext.tsx | 46 +
src/components/askAi/askAiModal.tsx | 405 ++++++
src/components/askAi/index.tsx | 2 +
src/components/askAiSearchParams.tsx | 21 +-
src/components/header.tsx | 5 +-
src/components/search/index.tsx | 20 +-
src/globals.d.ts | 3 -
vercel.json | 2 +-
13 files changed, 1710 insertions(+), 53 deletions(-)
create mode 100644 .opencode/plans/replace-kapa-with-docs-mcp.md
create mode 100644 src/components/askAi/askAiContext.tsx
create mode 100644 src/components/askAi/askAiModal.tsx
create mode 100644 src/components/askAi/index.tsx
diff --git a/.env.example b/.env.example
index d377318329cede..55cd6975d95150 100644
--- a/.env.example
+++ b/.env.example
@@ -5,4 +5,5 @@
NEXT_PUBLIC_ALGOLIA_APP_ID=
NEXT_PUBLIC_ALGOLIA_SEARCH_KEY=
-NEXT_PUBLIC_SENTRY_DSN=
\ No newline at end of file
+NEXT_PUBLIC_SENTRY_DSN=
+NEXT_PUBLIC_ASK_AI_API_URL=https://docs-mcp.getsentry.workers.dev
\ No newline at end of file
diff --git a/.opencode/plans/replace-kapa-with-docs-mcp.md b/.opencode/plans/replace-kapa-with-docs-mcp.md
new file mode 100644
index 00000000000000..37d7a7f4375b26
--- /dev/null
+++ b/.opencode/plans/replace-kapa-with-docs-mcp.md
@@ -0,0 +1,1193 @@
+# Replace Kapa AI with docs-mcp
+
+## Overview
+
+Replace the third-party Kapa AI chatbot widget with a first-party AI chat powered by the `docs-mcp` Cloudflare Worker in `getsentry/cli-init-api`. This gives us full control over the AI experience, better answer quality (using docs.sentry.io's `.md` exports directly), and eliminates the dependency on Kapa's external service.
+
+## Architecture
+
+```
+Browser (sentry-docs) Cloudflare Worker (docs-mcp)
+┌─────────────────────┐ ┌──────────────────────────┐
+│ AskAI Modal │ POST /api/ask │ │
+│ (React component) │ ───SSE stream───> │ ask-agent │
+│ │ │ ├─ search_docs (doctree)│
+│ - multi-turn chat │ <──text/deltas── │ ├─ fetch_doc_page (.md) │
+│ - streaming display │ │ └─ Claude Sonnet answer │
+│ - markdown render │ │ │
+└─────────────────────┘ └──────────────────────────┘
+```
+
+**Why this is better than Kapa:**
+- Uses our own `.md` exports (structured, up-to-date)
+- Full control over the prompt, model, and answer quality
+- No third-party JS bundle (faster page load)
+- No external dependencies (Kapa's Google Cloud Run proxy)
+- Can cite specific doc pages with links
+- Can be tuned specifically for Sentry docs
+
+---
+
+## Part 1: docs-mcp Backend Changes
+
+All changes in `getsentry/cli-init-api`, specifically `apps/docs-mcp/`.
+
+### 1.1 Add AI SDK dependencies
+
+**File: `apps/docs-mcp/package.json`**
+
+Add to `dependencies`:
+```json
+"@ai-sdk/anthropic": "^1.2.0",
+"ai": "^5.0.0"
+```
+
+Run `bun install` from the monorepo root.
+
+### 1.2 Update Env type
+
+**File: `apps/docs-mcp/src/types.ts`**
+
+Add `ANTHROPIC_API_KEY` to the `Env` interface:
+
+```typescript
+export interface Env {
+ ANTHROPIC_API_KEY: string;
+ CF_VERSION_METADATA: string;
+}
+```
+
+Set this secret via `wrangler secret put ANTHROPIC_API_KEY`.
+
+### 1.3 Create doc fetcher for docs.sentry.io
+
+**New file: `apps/docs-mcp/src/docs/fetcher.ts`**
+
+This fetches pages from `docs.sentry.io` using `.md` exports and `doctree.json`. The docs-mcp already has `skill-fetcher.ts` for `skills.sentry.dev` — this is the equivalent for the docs site itself.
+
+```typescript
+import * as Sentry from "@sentry/cloudflare";
+
+const DOCS_BASE = "https://docs.sentry.io";
+const PAGE_CACHE_TTL = 3600; // 1 hour for individual pages
+const DOCTREE_CACHE_TTL = 21600; // 6 hours for doctree
+const FETCH_TIMEOUT_MS = 10_000;
+
+// ── Types ───────────────────────────────────────────────────────────
+
+export interface DocTreeNode {
+ title: string;
+ description?: string;
+ path: string;
+ children?: DocTreeNode[];
+}
+
+export interface DocSearchHit {
+ title: string;
+ description: string;
+ path: string;
+ url: string;
+}
+
+// ── Cached fetch helper ─────────────────────────────────────────────
+
+async function cachedFetch(url: string, ttl: number): Promise {
+ return Sentry.startSpan(
+ { op: "http.client", name: `fetch ${url}` },
+ async (span) => {
+ let cache: Cache | undefined;
+ try {
+ cache = caches?.default;
+ } catch {
+ // Not in CF runtime
+ }
+
+ const request = new Request(url);
+
+ if (cache) {
+ const cached = await cache.match(request);
+ if (cached) {
+ span.setAttribute("cache.hit", true);
+ return cached.text();
+ }
+ }
+
+ span.setAttribute("cache.hit", false);
+ const response = await fetch(request, {
+ signal: AbortSignal.timeout(FETCH_TIMEOUT_MS),
+ });
+
+ if (!response.ok) {
+ throw new Error(`Failed to fetch ${url}: ${response.status}`);
+ }
+
+ const body = await response.text();
+
+ if (cache) {
+ const cacheResponse = new Response(body, {
+ headers: {
+ "Cache-Control": `public, max-age=${ttl}`,
+ "Content-Type": response.headers.get("Content-Type") ?? "text/plain",
+ },
+ });
+ await cache.put(request, cacheResponse);
+ }
+
+ return body;
+ }
+ );
+}
+
+// ── Doctree ─────────────────────────────────────────────────────────
+
+let doctreeCache: DocTreeNode[] | null = null;
+
+async function getDoctree(): Promise {
+ if (doctreeCache) return doctreeCache;
+
+ const raw = await cachedFetch(`${DOCS_BASE}/doctree.json`, DOCTREE_CACHE_TTL);
+ doctreeCache = JSON.parse(raw) as DocTreeNode[];
+ return doctreeCache;
+}
+
+function flattenDoctree(
+ nodes: DocTreeNode[],
+ results: DocSearchHit[] = []
+): DocSearchHit[] {
+ for (const node of nodes) {
+ results.push({
+ title: node.title,
+ description: node.description ?? "",
+ path: node.path,
+ url: `${DOCS_BASE}${node.path}`,
+ });
+ if (node.children) {
+ flattenDoctree(node.children, results);
+ }
+ }
+ return results;
+}
+
+// ── Search ──────────────────────────────────────────────────────────
+
+export async function searchDocs(
+ query: string,
+ maxResults = 8
+): Promise {
+ const tree = await getDoctree();
+ const allPages = flattenDoctree(tree);
+
+ const terms = query.toLowerCase().split(/\s+/).filter(Boolean);
+
+ const scored = allPages
+ .map((page) => {
+ const haystack =
+ `${page.title} ${page.description} ${page.path}`.toLowerCase();
+ let score = 0;
+ for (const term of terms) {
+ if (page.title.toLowerCase().includes(term)) score += 3;
+ if (page.path.toLowerCase().includes(term)) score += 2;
+ if (page.description.toLowerCase().includes(term)) score += 1;
+ }
+ return { page, score };
+ })
+ .filter((s) => s.score > 0)
+ .sort((a, b) => b.score - a.score)
+ .slice(0, maxResults);
+
+ return scored.map((s) => s.page);
+}
+
+// ── Fetch page ──────────────────────────────────────────────────────
+
+export async function fetchDocPage(path: string): Promise {
+ const normalizedPath = path.replace(/\/$/, "");
+ const url = `${DOCS_BASE}${normalizedPath}.md`;
+ const content = await cachedFetch(url, PAGE_CACHE_TTL);
+ // Truncate very long pages to stay within token budget
+ return content.length > 25_000 ? content.slice(0, 25_000) + "\n\n[truncated]" : content;
+}
+```
+
+### 1.4 Create the ask agent
+
+**New file: `apps/docs-mcp/src/ask/agent.ts`**
+
+This is the core AI agent that answers questions about Sentry using docs.
+
+```typescript
+import { anthropic } from "@ai-sdk/anthropic";
+import * as Sentry from "@sentry/cloudflare";
+import { streamText, tool } from "ai";
+import { z } from "zod";
+import { fetchDocPage, searchDocs } from "../docs/fetcher";
+
+// ── Agent-internal tools ────────────────────────────────────────────
+
+const agentTools = {
+ search_docs: tool({
+ description:
+ "Search the Sentry documentation for pages relevant to a query. " +
+ "Returns titles, descriptions, and paths of matching pages.",
+ parameters: z.object({
+ query: z
+ .string()
+ .describe("Search query — use keywords related to the user's question"),
+ }),
+ execute: async ({ query }) => {
+ const hits = await searchDocs(query);
+ return hits.map((h) => ({
+ title: h.title,
+ description: h.description,
+ path: h.path,
+ url: h.url,
+ }));
+ },
+ }),
+
+ fetch_doc_page: tool({
+ description:
+ "Fetch the full markdown content of a specific Sentry documentation page. " +
+ "Use this after search_docs to read the details of a relevant page.",
+ parameters: z.object({
+ path: z
+ .string()
+ .describe("The path of the doc page, e.g. /platforms/javascript/guides/nextjs/"),
+ }),
+ execute: async ({ path }) => {
+ const content = await fetchDocPage(path);
+ return { path, content };
+ },
+ }),
+};
+
+// ── System prompt ───────────────────────────────────────────────────
+
+const SYSTEM_PROMPT = `You are the Sentry Docs AI assistant. You help users with questions about Sentry — an application monitoring and error tracking platform.
+
+Your knowledge comes from the official Sentry documentation at docs.sentry.io. You MUST use the provided tools to search and read documentation before answering questions. Do not rely on prior knowledge — always verify against the current docs.
+
+## Guidelines
+
+1. **Always search first.** Use search_docs to find relevant pages, then fetch_doc_page to read them before answering.
+2. **Be accurate.** Only provide information that is supported by the documentation you've read. If you can't find an answer, say so.
+3. **Cite sources.** Include links to relevant documentation pages in your answers using markdown links like [Page Title](https://docs.sentry.io/path/).
+4. **Be concise.** Give direct, actionable answers. Include code snippets when they help.
+5. **Use markdown.** Format your responses with proper markdown — headings, code blocks with language tags, lists, etc.
+6. **Stay on topic.** Only answer questions about Sentry. For unrelated questions, politely redirect.
+7. **Handle platform specifics.** If the user mentions a specific platform/framework (Next.js, Django, etc.), search for platform-specific docs.
+
+## What you can help with
+- SDK setup and configuration
+- Error monitoring, tracing, session replay, logs, profiling, crons
+- Product features (alerts, dashboards, releases, etc.)
+- Integrations (GitHub, Slack, etc.)
+- Troubleshooting SDK issues
+- Migration between SDK versions
+- Self-hosted Sentry setup
+
+## Privacy
+Do not ask for or accept any sensitive information like auth tokens, DSNs with secrets, or personal data. If a user shares such information, remind them not to.`;
+
+// ── Message types ───────────────────────────────────────────────────
+
+export interface ChatMessage {
+ role: "user" | "assistant";
+ content: string;
+}
+
+// ── Stream response ─────────────────────────────────────────────────
+
+export function streamAskResponse(
+ messages: ChatMessage[],
+ apiKey: string
+) {
+ return Sentry.startSpan(
+ {
+ op: "ai.ask",
+ name: "streamAskResponse",
+ attributes: { "ai.message_count": messages.length },
+ },
+ () => {
+ return streamText({
+ model: anthropic.chat("claude-sonnet-4-20250514", {
+ apiKey,
+ }),
+ system: SYSTEM_PROMPT,
+ messages,
+ tools: agentTools,
+ maxSteps: 5,
+ temperature: 0,
+ onError: (error) => {
+ Sentry.captureException(error);
+ },
+ });
+ }
+ );
+}
+```
+
+### 1.5 Add the `/api/ask` endpoint
+
+**File: `apps/docs-mcp/src/index.ts`** — add the new route:
+
+```typescript
+import { StreamableHTTPTransport } from "@hono/mcp";
+import { Hono } from "hono";
+import { cors } from "hono/cors";
+import { createMcpServer } from "./mcp";
+import * as Sentry from "@sentry/cloudflare";
+import type { Env } from "./types";
+import { streamAskResponse, type ChatMessage } from "./ask/agent";
+
+const app = new Hono<{ Bindings: Env }>().use(cors());
+
+// ── MCP endpoint (existing) ─────────────────────────────────────────
+
+app.all("/mcp", async (c) => {
+ const transport = new StreamableHTTPTransport();
+ const mcp = createMcpServer();
+ await mcp.connect(transport);
+ return transport.handleRequest(c);
+});
+
+// ── Ask AI endpoint (new) ───────────────────────────────────────────
+
+app.post("/api/ask", async (c) => {
+ const body = await c.req.json<{ messages: ChatMessage[] }>();
+
+ if (!body.messages || !Array.isArray(body.messages) || body.messages.length === 0) {
+ return c.json({ error: "messages array is required" }, 400);
+ }
+
+ // Limit conversation length to prevent abuse
+ if (body.messages.length > 20) {
+ return c.json({ error: "conversation too long (max 20 messages)" }, 400);
+ }
+
+ const apiKey = c.env.ANTHROPIC_API_KEY;
+ if (!apiKey) {
+ return c.json({ error: "AI service not configured" }, 503);
+ }
+
+ const result = streamAskResponse(body.messages, apiKey);
+
+ // Return as a streaming response using Vercel AI SDK's data stream
+ return result.toDataStreamResponse({
+ headers: {
+ "Access-Control-Allow-Origin": "*",
+ "Cache-Control": "no-cache",
+ },
+ });
+});
+
+// ── Export ───────────────────────────────────────────────────────────
+
+export default Sentry.withSentry(
+ (env: Env) => ({
+ dsn: "https://05c618aef6e0489b8c9d07e0d65664ce@o1.ingest.us.sentry.io/4511189954199552",
+ sendDefaultPii: true,
+ tracesSampleRate: 1.0,
+ }),
+ app
+);
+```
+
+### 1.6 Add metrics for the ask endpoint
+
+**File: `apps/docs-mcp/src/metrics.ts`** — add:
+
+```typescript
+/** Track an /api/ask call. */
+export function trackAskCall(success: boolean, messageCount: number): void {
+ Sentry.metrics.count("docs_mcp.ask.call", 1, {
+ attributes: { success, message_count: messageCount },
+ });
+}
+
+/** Track /api/ask response duration. */
+export function trackAskDuration(durationMs: number): void {
+ Sentry.metrics.distribution("docs_mcp.ask.duration", durationMs, {
+ unit: "millisecond",
+ });
+}
+```
+
+### 1.7 Summary of new files in docs-mcp
+
+```
+apps/docs-mcp/src/
+├── ask/
+│ └── agent.ts # NEW: Ask agent with streaming + doc tools
+├── docs/
+│ └── fetcher.ts # NEW: docs.sentry.io page/doctree fetcher
+├── index.ts # MODIFIED: add /api/ask route
+├── mcp.ts # UNCHANGED
+├── metrics.ts # MODIFIED: add ask metrics
+├── skills/ # UNCHANGED
+│ ├── registry.ts
+│ ├── skill-fetcher.ts
+│ └── skill-parser.ts
+└── types.ts # MODIFIED: add ANTHROPIC_API_KEY to Env
+```
+
+---
+
+## Part 2: sentry-docs Frontend Changes
+
+All changes in `getsentry/sentry-docs`.
+
+### 2.1 Add environment variable
+
+**File: `.env.example`** — add:
+```
+NEXT_PUBLIC_ASK_AI_API_URL=https://docs-mcp.getsentry.workers.dev
+```
+
+### 2.2 Create AskAI context provider
+
+**New file: `src/components/askAi/askAiContext.tsx`**
+
+Provides modal open/close state and the ability to open with a pre-filled query.
+
+```tsx
+'use client';
+
+import {createContext, useCallback, useContext, useMemo, useState} from 'react';
+
+interface AskAiContextValue {
+ isOpen: boolean;
+ initialQuery: string | null;
+ autoSubmit: boolean;
+ open: (opts?: {query?: string; submit?: boolean}) => void;
+ close: () => void;
+}
+
+const AskAiContext = createContext(null);
+
+export function AskAiProvider({children}: {children: React.ReactNode}) {
+ const [isOpen, setIsOpen] = useState(false);
+ const [initialQuery, setInitialQuery] = useState(null);
+ const [autoSubmit, setAutoSubmit] = useState(false);
+
+ const open = useCallback((opts?: {query?: string; submit?: boolean}) => {
+ setInitialQuery(opts?.query ?? null);
+ setAutoSubmit(opts?.submit ?? false);
+ setIsOpen(true);
+ }, []);
+
+ const close = useCallback(() => {
+ setIsOpen(false);
+ setInitialQuery(null);
+ setAutoSubmit(false);
+ }, []);
+
+ const value = useMemo(
+ () => ({isOpen, initialQuery, autoSubmit, open, close}),
+ [isOpen, initialQuery, autoSubmit, open, close]
+ );
+
+ return {children};
+}
+
+export function useAskAi() {
+ const ctx = useContext(AskAiContext);
+ if (!ctx) {
+ throw new Error('useAskAi must be used within AskAiProvider');
+ }
+ return ctx;
+}
+```
+
+### 2.3 Create the AskAI modal chat component
+
+**New file: `src/components/askAi/askAiModal.tsx`**
+
+This is the main chat modal component with streaming markdown support.
+
+```tsx
+'use client';
+
+import {Cross1Icon} from '@radix-ui/react-icons';
+import {useCallback, useEffect, useRef, useState} from 'react';
+
+import {MagicIcon} from '../cutomIcons/magic';
+import {useAskAi} from './askAiContext';
+
+const ASK_AI_API_URL =
+ process.env.NEXT_PUBLIC_ASK_AI_API_URL ?? 'https://docs-mcp.getsentry.workers.dev';
+
+interface Message {
+ role: 'user' | 'assistant';
+ content: string;
+}
+
+export function AskAiModal() {
+ const {isOpen, initialQuery, autoSubmit, close} = useAskAi();
+ const [messages, setMessages] = useState([]);
+ const [input, setInput] = useState('');
+ const [isStreaming, setIsStreaming] = useState(false);
+ const [error, setError] = useState(null);
+ const messagesEndRef = useRef(null);
+ const inputRef = useRef(null);
+ const hasAutoSubmitted = useRef(false);
+
+ // Handle initial query from context
+ useEffect(() => {
+ if (isOpen && initialQuery && !hasAutoSubmitted.current) {
+ setInput(initialQuery);
+ if (autoSubmit) {
+ hasAutoSubmitted.current = true;
+ // Submit after a tick to let the input render
+ setTimeout(() => {
+ submitMessage(initialQuery);
+ }, 0);
+ }
+ }
+ if (!isOpen) {
+ hasAutoSubmitted.current = false;
+ }
+ }, [isOpen, initialQuery, autoSubmit]);
+
+ // Focus input when modal opens
+ useEffect(() => {
+ if (isOpen && !autoSubmit) {
+ setTimeout(() => inputRef.current?.focus(), 100);
+ }
+ }, [isOpen, autoSubmit]);
+
+ // Scroll to bottom on new messages
+ useEffect(() => {
+ messagesEndRef.current?.scrollIntoView({behavior: 'smooth'});
+ }, [messages]);
+
+ // Close on Escape
+ useEffect(() => {
+ const handleKeyDown = (e: KeyboardEvent) => {
+ if (e.key === 'Escape' && isOpen) {
+ close();
+ }
+ };
+ window.addEventListener('keydown', handleKeyDown);
+ return () => window.removeEventListener('keydown', handleKeyDown);
+ }, [isOpen, close]);
+
+ const submitMessage = useCallback(
+ async (messageText?: string) => {
+ const text = messageText ?? input.trim();
+ if (!text || isStreaming) return;
+
+ setError(null);
+ const userMessage: Message = {role: 'user', content: text};
+ const newMessages = [...messages, userMessage];
+ setMessages(newMessages);
+ setInput('');
+ setIsStreaming(true);
+
+ try {
+ const response = await fetch(`${ASK_AI_API_URL}/api/ask`, {
+ method: 'POST',
+ headers: {'Content-Type': 'application/json'},
+ body: JSON.stringify({messages: newMessages}),
+ });
+
+ if (!response.ok) {
+ const errBody = await response.text();
+ throw new Error(errBody || `HTTP ${response.status}`);
+ }
+
+ // Parse Vercel AI SDK data stream
+ const reader = response.body?.getReader();
+ if (!reader) throw new Error('No response body');
+
+ const decoder = new TextDecoder();
+ let assistantContent = '';
+
+ // Add empty assistant message that we'll stream into
+ setMessages(prev => [...prev, {role: 'assistant', content: ''}]);
+
+ while (true) {
+ const {done, value} = await reader.read();
+ if (done) break;
+
+ const chunk = decoder.decode(value, {stream: true});
+ // Vercel AI SDK data stream format: lines starting with specific prefixes
+ // 0: is text delta, e: is error, d: is done
+ const lines = chunk.split('\n');
+ for (const line of lines) {
+ if (line.startsWith('0:')) {
+ // Text delta — parse the JSON string after "0:"
+ try {
+ const text = JSON.parse(line.slice(2));
+ assistantContent += text;
+ setMessages(prev => {
+ const updated = [...prev];
+ updated[updated.length - 1] = {
+ role: 'assistant',
+ content: assistantContent,
+ };
+ return updated;
+ });
+ } catch {
+ // skip malformed chunks
+ }
+ }
+ }
+ }
+ } catch (err) {
+ setError(err instanceof Error ? err.message : 'Failed to get response');
+ // Remove the empty assistant message if there was an error
+ setMessages(prev => {
+ if (prev.length > 0 && prev[prev.length - 1].content === '') {
+ return prev.slice(0, -1);
+ }
+ return prev;
+ });
+ } finally {
+ setIsStreaming(false);
+ }
+ },
+ [input, messages, isStreaming]
+ );
+
+ const handleSubmit = useCallback(
+ (e: React.FormEvent) => {
+ e.preventDefault();
+ submitMessage();
+ },
+ [submitMessage]
+ );
+
+ const handleKeyDown = useCallback(
+ (e: React.KeyboardEvent) => {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault();
+ submitMessage();
+ }
+ },
+ [submitMessage]
+ );
+
+ const handleNewConversation = useCallback(() => {
+ setMessages([]);
+ setInput('');
+ setError(null);
+ inputRef.current?.focus();
+ }, []);
+
+ if (!isOpen) return null;
+
+ return (
+
+ {/* Backdrop */}
+
+
+ {/* Modal */}
+
+ {/* Header */}
+
+
+
+
+ Ask AI
+
+
+
+ {messages.length > 0 && (
+
+ )}
+
+
+
+
+ {/* Messages */}
+
+ {messages.length === 0 && (
+
+
+
+ Ask me anything about Sentry
+
+
+ {[
+ 'How do I set up Sentry for Next.js?',
+ 'What are tracePropagationTargets?',
+ 'How do I set up distributed tracing?',
+ ].map(q => (
+
+ ))}
+