diff --git a/.optimize-cache.json b/.optimize-cache.json index 60b0fb92f0..54afcdefdc 100644 --- a/.optimize-cache.json +++ b/.optimize-cache.json @@ -98,7 +98,9 @@ "images/blog/5-vs-code-extensions-that-replace-entire-dev-tools/thunder-client.png": "4017e48f8c3641d140d26e7f7039c75367345b423f15ecf292cf0c99b25ca43a", "images/blog/5-vs-code-extensions-that-replace-entire-dev-tools/time-master.png": "0088a67ba8de6544ba27961efa93b6ac3b2921f9edde18612133fc9339d73b76", "images/blog/6-practical-ways-developers-use-ai-to-build-faster/cover.png": "df0d7e4c191e7209519f58ecc94cd4a9682d4ef2e99b74e5aedab7b9841206e4", + "images/blog/7-prompting-mistakes-you-need-to-stop-making-right-now/cover.png": "eea096934c726917cfa1686a6c23198bf70c4215191101302047aef440404dde", "images/blog/7-steps-gdpr-startups/cover.png": "9894264a71940716de2ec5e09711834791ddd1c510dee9e5bf42a864343c5a2d", + "images/blog/7-things-claude-can-do-that-will-blow-your-mind/cover.png": "7bb98ebda77a4cc582b67e5d72db18ee32b94278d17a805ee9a94f1f459db741", "images/blog/a-recap-of-init/init1.png": "446305a616f6ce3ec77b01e5f5ab5dbf0e68f32268a3d5aab7249fc055ff61cd", "images/blog/a-recap-of-init/init10.png": "8f39e8d643d0630ced6f5c96a8bc9dbac72a7759e2e7caf7a09699f03566b184", "images/blog/a-recap-of-init/init11.png": "77ee790eecb99b592884a2c55933011587b965a95bccafd9fbd6cbeec6f81416", @@ -1062,6 +1064,7 @@ "images/blog/the-journey-and-meaning-behind-our-new-logo/old_logo.png": "0690abbd5e2720df68534729973ff0155c383900d64bcd297e1c67e517c785ce", "images/blog/the-shift-from-SaaS-to-Vertical-AI-what-startup-founders-need-to-know/cover-image.png": "e4030cb8b735baa8f4f6eec9d0d32233011759b160882015738cda2e79da14d6", "images/blog/the-subtle-art-of-hackathon ideation/cover.png": "a4007fb895ed8cb284e2409897282a784b803c199b91d58e90a2dd69f367ba33", + "images/blog/the-top-3-claude-features-you-are-probably-not-using/cover.png": "429930ed746a8d47a42358c99c1eba6ee0d33531d4270ac4f24594438f65f503", "images/blog/the-underrated-value-of-great-sdk-design/cover.png": "9440e4f5a69b01d20796926ef3543dbba61fd1ba66e114713cf4ee503bc1d4b9", "images/blog/threads-cover.png": "fa44d6cd70000ac7a62d3b9446b171f8e9fe1b27f157cb6ba2f98c1f8c043526", "images/blog/three-important-steps-you-need-to-complete-with-appwrite/3-important-steps.png": "5a3ad677a3aff5f27f0ceb8b751ba830fb6e11ca5edca126df34777f378b15a4", @@ -1075,6 +1078,7 @@ "images/blog/top-25-vibe-coding-tools/cover.png": "ce7108cb2918ddcad9e04955172c301f02593d4714a37eac5a31c2f1167691df", "images/blog/top-5-tips-to-build-an-AI-agent-startup/cover-image.png": "edaeb08b23398fd35e12b176a5b487e64804bcf884c5cf18ff49568050af650e", "images/blog/top-6-vector-databases-2025/cover.png": "89150f1319bf3c66182c34304ee2b9847eddbfd9768ebe55d93d34979fded034", + "images/blog/top-7-prompts-every-developer-should-use-to-get-better-results/cover.png": "2dd4eb55c0c2d13b6d8a25ba63606e3047f5b50d36b450e0a8cf0e5dc6e5ed86", "images/blog/top-australia-incubators-accelerators/cover.png": "42ad622b49db1044a7106aad2bbc963ceb003b3d5818b40dd738d1fc915d8988", "images/blog/top-auth0-alternatives/Appwrite.png": "cc920e9920561fd7b452cb46838eec63c1cab47bdbb4f4de17b23a891fcbee69", "images/blog/top-auth0-alternatives/Cognito.png": "c10f2bd1786b47af57f03ce442b50603a15dff2d33d03ffa6fbfa51c9ca95d20", diff --git a/src/routes/blog/post/7-prompting-mistakes-you-need-to-stop-making-right-now/+page.markdoc b/src/routes/blog/post/7-prompting-mistakes-you-need-to-stop-making-right-now/+page.markdoc new file mode 100644 index 0000000000..8effd79ae7 --- /dev/null +++ b/src/routes/blog/post/7-prompting-mistakes-you-need-to-stop-making-right-now/+page.markdoc @@ -0,0 +1,102 @@ +--- +layout: post +title: 7 prompting mistakes you need to stop making right now +description: Avoid these 7 common prompting mistakes that lead to poor AI outputs. Learn how to write better prompts and get more accurate and reliable results. +date: 2026-04-27 +cover: /images/blog/7-prompting-mistakes-you-need-to-stop-making-right-now/cover.png +timeToRead: 5 +author: aishwari +category: ai +featured: false +unlisted: true +--- + +You write a prompt. The AI gives you something half-baked. You try again, tweak the wording, and still get something that misses the point. Sound familiar? + +The problem usually isn't the model. It's the prompt. + +Most developers learned to prompt by trial and error, which means they picked up bad habits along the way. This post covers the seven most common prompting mistakes, why they cause problems, and what to do instead. + +# 7 common prompting mistakes and how to fix them + +## Mistake 1: Writing vague, context-free prompts + +The most common mistake is treating the AI like it already knows what you're working on. Prompts like "fix this bug" or "write a function for this" give the model almost nothing to work with. It fills in the gaps with assumptions, and those assumptions are usually wrong. + +The model doesn't know your stack, your naming conventions, your constraints, or what "done" looks like. You have to tell it. + +**What to do instead:** Include the language, framework, what the code is supposed to do, and what went wrong. A prompt like "This TypeScript function throws a type error when the user is null. Here's the code and the error message. Fix it without changing the return type" gives the model actual direction. + +## Mistake 2: Asking for everything in one prompt + +Trying to solve too much in a single prompt is one of the fastest ways to get a generic, low-quality response. If you ask the AI to plan an architecture, write the implementation, add error handling, and create tests all at once, the output will be shallow across all of it. + +Models work better when they can focus. A single, well-scoped prompt almost always beats one massive ask. + +**What to do instead:** Break the work into steps. Plan first, then implement, then refine. Treat it like a series of focused requests, not a one-shot command. This is especially true for complex tasks involving multiple files or systems. + +## Mistake 3: Not specifying the output format + +If you don't say what format you want, you'll get whatever the model decides to give you. Sometimes that's a wall of prose when you wanted a list. Sometimes it's bare code when you needed an explanation alongside it. Every extra formatting step you do manually is time wasted. + +**What to do instead:** Be explicit. Say "return only the function, no explanation" or "respond with a numbered list" or "format this as a JSON object with these fields." A single line about format saves multiple back-and-forth iterations. + +## Mistake 4: Giving no examples + +Describing what you want in words is harder than showing it. If you want the model to match a particular style, structure, or output pattern, the fastest way to get there is to show an example. + +This is especially true for code style, data formats, and tone in generated content. + +**What to do instead:** Include a short example of the output you're expecting. Even one example dramatically narrows the solution space and improves accuracy. If you want the AI to generate a database query in a specific style, paste in one query that looks right and say "write more like this." + +## Mistake 5: Treating the first response as final + +Most developers accept the first output, copy it, and move on. This is leaving quality on the table. AI outputs are a starting point, not a finished product. The first response sets a baseline. The real value comes from iteration. + +**What to do instead:** Follow up. If the response is 80% right, tell it what's off and ask it to revise. If it's heading the right direction, ask it to go deeper on a specific section. Prompting is a conversation, not a command-and-receive loop. A second or third turn with specific feedback almost always produces better results than rewriting the original prompt from scratch. + +## Mistake 6: Ignoring the system prompt or context window + +If you're building AI-powered features or using the API, skipping the system prompt is a significant mistake. The system prompt is where you define the model's behavior, constraints, and role. Without it, the model has no guardrails and will make a lot of implicit decisions about how to behave. + +Similarly, dumping too much irrelevant context into the prompt dilutes the signal. The model pays attention to all of it, which means your actual question can get buried. + +**What to do instead:** Use the system prompt to define scope and behavior. Keep the context relevant. If you're passing in code, include only the parts that matter to the task. If you're working with an AI assistant, give it a clear role and constraints upfront. For developers building on the [Claude API](https://docs.anthropic.com/en/docs/get-started), the system prompt is the most important lever you have for consistent behavior. + +## Mistake 7: Not iterating on prompts that consistently fail + +If you've run the same type of prompt ten times and gotten mediocre results ten times, the prompt is broken. A lot of developers keep re-running broken prompts hoping the output will improve. It won't. The model isn't getting better at your specific task between runs. + +**What to do instead:** Treat failing prompts like failing tests. Debug them. Figure out which part of the prompt is causing the issue: is it missing context, a poorly specified constraint, or an ambiguous instruction? Change one variable at a time. Once you find a prompt pattern that works well, save it. Reusable prompt templates are one of the highest-leverage investments you can make if you use AI tools regularly. + +# What good prompting actually looks like + +Here's a concrete before-and-after example: + +**Before:** +``` +Write me a login function +``` + +**After:** +``` +Write a login function in Node.js using Express and bcrypt. +The function should accept email and password, query a PostgreSQL +database using the pg library, compare the hashed password, and +return a signed JWT on success or a 401 error on failure. +Do not use any ORM. Return only the function, no boilerplate. +``` + +The second prompt gives the model a language, a framework, specific libraries, expected behavior, an error case, and an output format constraint. Every one of those details narrows the output space and brings the result closer to what you actually need. + +# Prompting well is a skill, not a shortcut + +The developers getting the most out of AI tools aren't the ones who use it the most. They're the ones who've learned to communicate precisely. Prompting is a communication skill. The better you get at it, the more leverage you get from every AI tool in your workflow. + +If you're building applications with AI features and want a backend that keeps up, [Appwrite](https://appwrite.io) gives you auth, databases, storage, and serverless functions in one platform. Appwrite also works with the [Model Context Protocol](/docs/tooling/mcp), so your AI assistant can interact directly with your backend, creating collections, querying data, and managing functions through natural language. It's a practical way to pair better prompting with better tooling. + +# More resources +- [Appwrite MCP server](/docs/tooling/mcp) +- [Build AI-powered applications with Appwrite](/docs/products/ai) +- [How to vibe code? 8 real-world workflow tips for faster builds](/blog/post/best-vibe-coding-tips) +- [Agentic AI vs Generative AI: A complete overview](/blog/post/agentic-ai-vs-generative-ai) diff --git a/src/routes/blog/post/7-things-claude-can-do-that-will-blow-your-mind/+page.markdoc b/src/routes/blog/post/7-things-claude-can-do-that-will-blow-your-mind/+page.markdoc new file mode 100644 index 0000000000..a5b43478cb --- /dev/null +++ b/src/routes/blog/post/7-things-claude-can-do-that-will-blow-your-mind/+page.markdoc @@ -0,0 +1,84 @@ +--- +layout: post +title: 7 things Claude can do that will blow your mind +description: Discover 7 things Claude can do that most people don't use. From better prompts to smarter workflows, learn how to get more out of your AI. +date: 2026-04-27 +cover: /images/blog/7-things-claude-can-do-that-will-blow-your-mind/cover.png +timeToRead: 5 +author: aishwari +category: ai +featured: false +unlisted: true +--- + +Most people use Claude like a glorified search engine. Ask a question, skim the answer, move on. That's the surface. Underneath it, there's a set of capabilities most developers haven't touched yet, and some of them will genuinely change how you work. + +Here are 7 things Claude can do that most people don't know about or use. + +# 1. Think through hard problems before answering + +Claude's **extended thinking** mode lets it pause and reason step by step before giving you an answer. Instead of a quick response based on pattern matching, it works through the problem the same way a developer would: breaking it apart, checking assumptions, catching edge cases, and then responding. + +This is particularly useful for debugging logic errors, designing system architecture, or working through tricky algorithm problems. The thinking is visible in the response, so you can follow the reasoning, not just accept the conclusion. + +If you've been getting shallow answers to complex questions, you're probably not giving Claude the signal that it should slow down and think. Being explicit helps: "Think through this carefully before answering" or framing the problem as a multi-step challenge. + +# 2. Read your entire codebase at once + +Claude supports a **200,000-token context window**. That's roughly 150,000 words, or the equivalent of several large source files at once. In practice, it means you can paste in an entire backend, a full migration history, or a set of interdependent modules and ask Claude to reason across all of it. + +This isn't useful just for question-answering. Where it really shines is understanding unfamiliar codebases. Drop in a service you've inherited, describe what you're trying to change, and let Claude trace the dependencies and surface the parts that actually matter. + +Most AI tools force you to chunk your code and lose context between requests. Claude holds it all together. + +# 3. Analyze screenshots and turn them into code + +Claude understands images. Hand it a screenshot of a UI, a mockup, a diagram, or a database schema drawn on a whiteboard and it can describe what it sees, identify patterns, and generate working code to match. + +This is particularly useful for **design-to-code workflows**. Share a Figma screenshot or a screen recording of a broken UI, and Claude can produce a layout that matches it, point to where the CSS is likely off, or suggest what the component structure should look like. + +Vision isn't a novelty here. It removes a translation step that used to require a designer, a developer, and a dozen back-and-forths. + +# 4. Write, run, and fix code autonomously with Claude Code + +**[Claude Code](https://claude.ai/code)** is an agentic coding tool that takes instructions in plain language and handles the implementation end to end. It reads your files, writes the code, runs tests, catches what failed, and iterates until it works. + +This goes well beyond autocomplete or suggestion-based tools. Claude Code can scaffold an entire feature, refactor a module, or investigate a bug across multiple files without you directing every step. You describe the outcome; it figures out the path. + +It's worth trying for tasks that would normally take you an afternoon of focused work. Set up auth, wire up an API integration, migrate a schema. Give it the goal and let it work. + +# 5. Connect to your tools in real time via MCP + +The **Model Context Protocol (MCP)** gives Claude the ability to connect to external systems, your database, your APIs, your services, and actually interact with them. Not just describe what it would do, but do it. + +With the [Appwrite MCP server](/docs/tooling/mcp), for example, Claude can create users, query collections, manage storage, and trigger functions directly inside your Appwrite project. You describe what you need in plain language, and Claude handles the operation. No copy-pasting API responses. No switching tabs to check the console. + +This is what [agentic AI](/blog/post/agentic-ai-vs-generative-ai) actually looks like in practice: the model connected to real systems, taking real actions. If you've only ever used Claude in a chat window, adding MCP changes the category of work it can handle. + +# 6. Remember context across conversations with Projects + +Claude has a **Projects** feature that lets it retain context across sessions. You can give it background about your codebase, your team's conventions, the constraints you're working under, and it carries that forward into every conversation inside the project. + +This removes the repetitive setup most developers hate. You don't need to re-explain your stack every time you open a new chat. You don't need to re-paste your schema to ask a follow-up question. Claude already knows what you've told it. + +It works especially well for ongoing work: a product you're actively building, a codebase you maintain, or any context that would take several paragraphs to re-establish from scratch. + +# 7. Act as a second engineer on technical decisions + +Most developers use Claude to fix code. Fewer use it to think through decisions. That's where it's arguably most valuable. + +Feed Claude a technical decision you're weighing, the trade-offs you've identified, the constraints you're working under, and ask it for a clear opinion. It will give you one. It won't hedge endlessly or tell you "it depends" without explaining what it depends on. It can draft the architecture, point out what you haven't considered, and tell you which approach it would take and why. + +This works best when you treat Claude like a collaborator, not a tool. Share the real context. Ask for the honest read. Push back when something doesn't land right. The output quality goes up fast when the conversation is two-directional. + +# Start using Claude with your backend + +Most of these capabilities are available out of the box, but the one that changes the day-to-day the most for developers is the MCP connection to a real backend. Once Claude can actually read and write to your project, it stops being an assistant and starts being an active part of your workflow. + +If you're using Appwrite, the [Appwrite MCP server](/docs/tooling/mcp) is the fastest way to get there. It connects Claude directly to your project so you can manage data, run operations, and prototype features without leaving your editor. The [Appwrite AI integrations](/integrations#ai) page has more on how to wire up AI tools to your stack. + +# More resources + +- [Appwrite MCP server documentation](/docs/tooling/mcp) +- [Agentic AI vs Generative AI: what's actually different](/blog/post/agentic-ai-vs-generative-ai) +- [10 best MCP servers for developers](/blog/post/10-best-mcp-server-client) diff --git a/src/routes/blog/post/the-top-3-claude-features-you-are-probably-not-using/+page.markdoc b/src/routes/blog/post/the-top-3-claude-features-you-are-probably-not-using/+page.markdoc new file mode 100644 index 0000000000..c51bae4deb --- /dev/null +++ b/src/routes/blog/post/the-top-3-claude-features-you-are-probably-not-using/+page.markdoc @@ -0,0 +1,124 @@ +--- +layout: post +title: The top 3 Claude features you are probably not using +description: Discover the top 3 Claude features most developers overlook. Learn how to use them to improve prompts, streamline workflows, and get better results from Claude. +date: 2026-04-27 +cover: /images/blog/the-top-3-claude-features-you-are-probably-not-using/cover.png +timeToRead: 5 +author: aishwari +category: ai +featured: false +unlisted: true +--- + +Most developers use Claude like a faster search engine. You paste a question, get an answer, open a new chat, repeat. That loop works, but it skips the features that make Claude genuinely useful for serious development work. + +These three features are all available today. They're just not obvious. + +# Extended thinking: let Claude reason before it responds + +The default Claude behavior is to respond immediately. For most questions, that's fine. For complex ones, it's a liability. + +**Extended thinking** is a mode where Claude works through a problem step by step before it generates a response. You see the reasoning chain, not just the conclusion. This matters when you're debugging something subtle, evaluating architectural trade-offs, or reviewing logic that has multiple layers of indirection. + +When using the Claude API, you enable it by adding a `thinking` block to your request: + +```json +{ + "model": "claude-opus-4-7", + "max_tokens": 16000, + "thinking": { + "type": "enabled", + "budget_tokens": 10000 + }, + "messages": [{ "role": "user", "content": "..." }] +} +``` + +The `budget_tokens` parameter sets a ceiling on how long Claude is allowed to think. A budget of 5,000 to 10,000 tokens covers most engineering tasks well. Higher budgets improve results on harder problems but consume more tokens, so calibrate based on task complexity. + +On Claude.ai, you can toggle extended thinking directly in the interface before sending a message. It's available on Pro and above. + +Where this is actually worth using: + +- Reviewing database schema designs for edge cases and data integrity issues +- Debugging recursive or async logic where the failure isn't immediately visible +- Evaluating whether to refactor a module or leave it alone +- Any decision where a confident-sounding wrong answer is worse than a slower right one + +Extended thinking isn't a general-purpose upgrade. It's a trade-off: more tokens, more latency, better reasoning for hard problems. Use it when accuracy matters more than speed. + +# Projects: stop rebuilding context from scratch every session + +If you open a new Claude chat every time you sit down to work, you're re-explaining your project from scratch every session. Claude doesn't carry any memory of your stack, your naming conventions, your team's architecture decisions, or what you worked on yesterday. + +**Projects** in Claude.ai fix this. A Project is a persistent workspace that holds: + +- **Custom instructions:** Your stack, preferred patterns, constraints, what to avoid +- **Uploaded files:** READMEs, API specs, database schemas, style guides, architecture docs +- **Conversation history:** All chats within a Project share the same base context + +In practice: you create a Project for a specific app or service. You upload the schema, the auth flow design, maybe a few key files. You write a brief system prompt: "We use TypeScript, Appwrite for the backend, React with functional components, no default exports." Every conversation inside that Project starts with all of that already loaded. + +The output quality difference is real. Instead of generic answers, Claude can catch inconsistencies against your existing patterns. Instead of guessing how you handle auth, it references what you've uploaded. + +Some setups that work well: + +- One Project per application with schema, environment notes, and deploy config +- A client Project with their stack, preferences, and naming conventions +- An interview prep Project with a target role's requirements and sample problems + +Projects are available on Claude Pro, Team, and Enterprise plans. If you're doing regular development work in Claude.ai, Projects are the highest-leverage change you can make to your workflow. + +# Prompt caching: cut costs and latency in your Claude API integrations + +If you build applications on top of the Claude API, this is the feature you're most likely missing and the one with the most direct impact on cost. + +**Prompt caching** lets you mark portions of your prompt as cacheable. On subsequent API calls that include the same cached content, Claude skips reprocessing it. The result: + +- Up to **90% reduction** in cost for cached input tokens +- Up to **85% reduction** in time to first token + +This matters when you have a large, stable system prompt. A detailed persona, a lengthy document, extensive instructions, or a big block of context that appears in every request. Without caching, Claude processes all of it on every API call. With caching, you pay full price once, then a fraction on every call after that. + +To enable it, add `"cache_control": {"type": "ephemeral"}` to the content blocks you want cached: + +```json +{ + "model": "claude-opus-4-5", + "system": [ + { + "type": "text", + "text": "You are a backend assistant for Acme Corp. [large context block here]", + "cache_control": { "type": "ephemeral" } + } + ], + "messages": [{ "role": "user", "content": "..." }] +} +``` + +The cache has a 5-minute TTL that refreshes every time the cached block is hit. For an application with consistent traffic, the cache stays warm almost continuously. + +Where it's worth applying: + +- System prompts defining a persona or operating rules +- Static documents attached to every request (internal API specs, product docs) +- Long conversation histories you prepend for continuity +- Retrieval-augmented generation pipelines where retrieved context stays the same across similar queries + +For most production Claude API integrations, prompt caching typically pays for the implementation time within the first day of traffic. If you're paying meaningful money on Claude API costs and haven't enabled caching, that's the first thing to change. + +# Going deeper with Claude's advanced features + +Extended thinking, Projects, and prompt caching address different layers of the Claude experience: reasoning quality, workflow continuity, and API efficiency. Each one closes a real gap. Together, they shift how you work with Claude day to day. + +For extended thinking and prompt caching, the [Anthropic documentation](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) has complete implementation details and model-specific notes worth reading before you ship anything to production. + +If you want to go further and connect Claude directly to a live backend, the [Appwrite MCP server](/docs/tooling/mcp) lets Claude interact with your Appwrite project through natural language in Claude Code or Claude Desktop. You can create users, query collections, manage files, and trigger functions without writing a single line of glue code. It's a practical way to bring agentic workflows into a real application stack. + +# More resources + +- [Appwrite MCP server documentation](/docs/tooling/mcp) +- [What is MCP and why is it trending?](/blog/post/what-is-mcp) +- [Build AI-powered applications with Appwrite](/docs/products/ai) +- [Exploring AI and vibe coding: Insights from the Appwrite developer community](/blog/post/ai-vibe-coding-insights) diff --git a/src/routes/blog/post/top-7-prompts-every-developer-should-use-to-get-better-results/+page.markdoc b/src/routes/blog/post/top-7-prompts-every-developer-should-use-to-get-better-results/+page.markdoc new file mode 100644 index 0000000000..c6863322c6 --- /dev/null +++ b/src/routes/blog/post/top-7-prompts-every-developer-should-use-to-get-better-results/+page.markdoc @@ -0,0 +1,175 @@ +--- +layout: post +title: Top 7 prompts every developer should use to get better results +description: Using AI but not getting great results? These 7 prompts help developers get clearer, more accurate outputs and actually make AI useful day to day. +date: 2026-04-27 +cover: /images/blog/top-7-prompts-every-developer-should-use-to-get-better-results/cover.png +timeToRead: 5 +author: aishwari +category: ai +featured: false +unlisted: true +--- + +Most developers use AI tools the same way every day: paste some code, type "fix this", and hope for the best. The output is often generic, half-wrong, or requires three follow-ups to get to something actually useful. + +The problem usually isn't the model. It's the prompt. + +AI tools don't read your mind. They respond to what you give them. Vague inputs produce vague outputs. The developers getting consistently useful results aren't using different tools. They're asking better questions. + +Here are 7 prompts that actually work, with examples you can adapt right now. + +# 1. Set the role before giving the task + +Generic prompts get generic answers. If you want output tuned to your context, tell the model who it's acting as before you describe the task. + +**Pattern:** "You are a senior [role] with experience in [domain]. [Task]." + +**Example:** + +``` +You are a senior backend engineer reviewing production Node.js code. +The service handles 10,000 concurrent requests per minute. +Review the following function for performance bottlenecks and memory leaks. +[paste code] +``` + +This works because the model adjusts its reasoning lens. A "senior backend engineer" weighs trade-offs differently than a "helpful assistant." You're not just asking for output. You're establishing what good output looks like. + +# 2. Structure your debug requests + +"Why is this broken?" is the most common developer prompt. It's also the least useful. Without the error message, relevant code, and what you've already tried, the model is guessing. + +**Pattern:** "Here's the error: [error]. Here's the relevant code: [code]. I've already tried [X]. What's the root cause?" + +**Example:** + +``` +Error: Cannot read properties of undefined (reading 'map') +Stack trace: [paste trace] + +Here's the component throwing the error: +[paste code] + +I've already checked that the API returns data. The console log confirms it. +What's causing this and how do I fix it? +``` + +Giving the model the error, the code, and your existing debugging steps removes the obvious dead ends. You'll skip two rounds of "have you tried checking if the value is undefined" and get straight to the actual cause. + +# 3. Ask for trade-offs, not just a solution + +When you're making architecture decisions, asking "what's the best way to do X?" rarely gives you something actionable. The model picks one approach and defends it. What you actually need is a comparison. + +**Pattern:** "I need to [goal]. Give me 3 different approaches with trade-offs for each." + +**Example:** + +``` +I need to implement real-time notifications in a web app. +Users need to receive updates when new records are added to a database. +Give me 3 different approaches with trade-offs for each. Consider latency, +infrastructure complexity, and cost at scale. +``` + +This forces the model to think in parallel instead of committing to a single path. You get the comparison you'd normally have to do yourself. Pair this with your specific constraints and you've turned a vague question into a decision framework. + +# 4. Write tests with coverage requirements + +"Write tests for this" produces happy-path tests and nothing else. If you want actual coverage, you have to specify what you want covered. + +**Pattern:** "Write unit tests for [function/module]. Cover: the happy path, edge cases [list them], and failure modes." + +**Example:** + +``` +Write unit tests for the following authentication function. +Cover: +- Happy path: valid credentials +- Edge cases: empty string inputs, null values, special characters in password +- Failure modes: expired token, incorrect password, user not found +Use Jest. Mock the database calls. +[paste function] +``` + +Listing the cases explicitly means you get tests for the cases that actually matter, not just the ones that are easy to write. You can also add "and tell me what you're not testing" to surface blind spots. + +# 5. Refactor with explicit constraints + +Open-ended refactor requests produce code that works differently than what you had. If you want cleaner code without behavior changes, say so directly. + +**Pattern:** "Refactor this for readability without changing behavior. Keep it idiomatic [language]. Do not change the function signature." + +**Example:** + +``` +Refactor this TypeScript function for readability. +Requirements: +- Do not change the function signature +- Do not change the return type +- Keep it idiomatic TypeScript. Use proper types, not any +- No external dependencies +[paste function] +``` + +Constraints prevent the model from over-engineering the solution. Without them, you're likely to get a refactor that introduces a new abstraction you didn't ask for or a signature change that breaks downstream callers. + +# 6. Ask for a security review with specific scope + +"Check this for security issues" produces a checklist of generic warnings. Scope the review and you get something you can actually act on. + +**Pattern:** "Review this [code/API/config] for security vulnerabilities. Focus on [injection, auth, data exposure, etc.]. For each issue, explain the risk and the fix." + +**Example:** + +``` +Review this Express.js route handler for security vulnerabilities. +Focus on: SQL injection, broken authentication, and sensitive data exposure. +For each issue you find: +- Explain what the vulnerability is +- Show where it exists in the code +- Provide the fixed version +[paste code] +``` + +Asking for the fix alongside the vulnerability means you're not just getting a report. You're getting a patch. The "show where it exists" part is important too. Without it, the model often gives you a generic warning that could apply to any codebase. + +# 7. Generate documentation with a real audience in mind + +"Document this" produces output that describes what the code does, not how to use it. Specify the audience and you get documentation that's actually useful. + +**Pattern:** "Write developer documentation for [API/function/module]. The audience is [who]. Include: [what to cover]." + +**Example:** + +``` +Write developer documentation for this REST API endpoint. +Audience: a developer integrating this API for the first time. +Include: +- What the endpoint does +- Request parameters and their types +- Response shape with an example +- Error codes and what they mean +- A complete curl example +[paste endpoint code or spec] +``` + +The audience constraint changes everything. "A developer integrating this for the first time" tells the model to favor clarity and examples over technical completeness. The explicit list of what to include means you get consistent structure across every endpoint you document. + +# The pattern behind all 7 + +Every prompt above does the same three things: it sets context, specifies the task clearly, and defines what good output looks like. Role, constraints, and coverage requirements are the levers you're pulling each time. + +The developers who get the most out of AI tools treat prompting as a skill, not a shortcut. It takes an extra 30 seconds to write a structured prompt. It saves you five minutes of follow-ups and one round of copy-pasting garbage output into a new conversation. + +For a look at what to avoid, [7 prompting mistakes you need to stop making right now](/blog/post/7-prompting-mistakes-you-need-to-stop-making-right-now) covers the other side of this. + +# Taking this further with Appwrite + +If you're building with AI assistants and using Appwrite as your backend, the [Appwrite MCP server](/docs/tooling/mcp) lets AI tools interact directly with your project. You can query collections, manage functions, and run operations through natural language inside tools like Claude Code, Cursor, or Windsurf. + +This means the prompts you've refined for code generation can also drive your backend. Instead of switching context between your AI tool and the Appwrite console, the model can do it directly. + +- [Appwrite MCP server docs](/docs/tooling/mcp) +- [Claude Code integration guide](/docs/tooling/ai/ai-dev-tools/claude-code) +- [7 prompting mistakes you need to stop making right now](/blog/post/7-prompting-mistakes-you-need-to-stop-making-right-now) diff --git a/static/images/blog/7-prompting-mistakes-you-need-to-stop-making-right-now/cover.png b/static/images/blog/7-prompting-mistakes-you-need-to-stop-making-right-now/cover.png new file mode 100644 index 0000000000..73bcecb463 Binary files /dev/null and b/static/images/blog/7-prompting-mistakes-you-need-to-stop-making-right-now/cover.png differ diff --git a/static/images/blog/7-things-claude-can-do-that-will-blow-your-mind/cover.png b/static/images/blog/7-things-claude-can-do-that-will-blow-your-mind/cover.png new file mode 100644 index 0000000000..5aaea580b1 Binary files /dev/null and b/static/images/blog/7-things-claude-can-do-that-will-blow-your-mind/cover.png differ diff --git a/static/images/blog/the-top-3-claude-features-you-are-probably-not-using/cover.png b/static/images/blog/the-top-3-claude-features-you-are-probably-not-using/cover.png new file mode 100644 index 0000000000..7dbd2ee7ff Binary files /dev/null and b/static/images/blog/the-top-3-claude-features-you-are-probably-not-using/cover.png differ diff --git a/static/images/blog/top-7-prompts-every-developer-should-use-to-get-better-results/cover.png b/static/images/blog/top-7-prompts-every-developer-should-use-to-get-better-results/cover.png new file mode 100644 index 0000000000..e740885f1a Binary files /dev/null and b/static/images/blog/top-7-prompts-every-developer-should-use-to-get-better-results/cover.png differ