Building GPT-4 Powered Coding Assistants
GPT-4’s improved code understanding opens new possibilities for developer tooling. Here’s how to build effective coding assistants that go beyond simple autocomplete.
Code Context Management
The key to useful code assistance is providing relevant context:
from dataclasses import dataclass
from typing import Optional
import ast
import os
@dataclass
class CodeContext:
current_file: str
current_code: str
cursor_position: tuple[int, int] # line, column
imports: list[str]
class_context: Optional[str]
function_context: Optional[str]
related_files: list[str]
class ContextBuilder:
"""Build context for code assistance."""
def __init__(self, project_root: str):
self.project_root = project_root
def build_context(
self,
file_path: str,
code: str,
cursor_line: int
) -> CodeContext:
"""Build comprehensive context for assistance."""
# Parse the code
try:
tree = ast.parse(code)
except SyntaxError:
tree = None
# Extract imports
imports = self._extract_imports(tree) if tree else []
# Find enclosing class/function
class_ctx, func_ctx = self._find_enclosing_scope(tree, cursor_line) if tree else (None, None)
# Find related files
related = self._find_related_files(file_path, imports)
return CodeContext(
current_file=file_path,
current_code=code,
cursor_position=(cursor_line, 0),
imports=imports,
class_context=class_ctx,
function_context=func_ctx,
related_files=related
)
def _extract_imports(self, tree: ast.AST) -> list[str]:
"""Extract import statements."""
imports = []
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for alias in node.names:
imports.append(alias.name)
elif isinstance(node, ast.ImportFrom):
module = node.module or ""
for alias in node.names:
imports.append(f"{module}.{alias.name}")
return imports
def _find_enclosing_scope(
self,
tree: ast.AST,
line: int
) -> tuple[Optional[str], Optional[str]]:
"""Find enclosing class and function."""
class_name = None
func_name = None
for node in ast.walk(tree):
if hasattr(node, 'lineno') and hasattr(node, 'end_lineno'):
if node.lineno <= line <= (node.end_lineno or node.lineno):
if isinstance(node, ast.ClassDef):
class_name = node.name
elif isinstance(node, ast.FunctionDef):
func_name = node.name
return class_name, func_name
def _find_related_files(
self,
file_path: str,
imports: list[str]
) -> list[str]:
"""Find related project files."""
related = []
for imp in imports:
# Convert import to potential file path
parts = imp.split('.')
potential_path = os.path.join(self.project_root, *parts) + ".py"
if os.path.exists(potential_path):
related.append(potential_path)
return related[:5] # Limit to 5 files
Code Completion Engine
class GPT4CodeCompletion:
"""GPT-4 powered code completion."""
def __init__(self, client):
self.client = client
self.context_builder = ContextBuilder(".")
async def complete(
self,
file_path: str,
code: str,
cursor_line: int,
cursor_col: int
) -> list[str]:
"""Generate code completions."""
context = self.context_builder.build_context(file_path, code, cursor_line)
# Get code before and after cursor
lines = code.split('\n')
before = '\n'.join(lines[:cursor_line])
current_line = lines[cursor_line] if cursor_line < len(lines) else ""
prefix = current_line[:cursor_col]
prompt = f"""Complete the code at the cursor position.
File: {file_path}
Imports: {', '.join(context.imports[:10])}
{f'Class: {context.class_context}' if context.class_context else ''}
{f'Function: {context.function_context}' if context.function_context else ''}
Code before cursor:
```python
{before[-2000:]}
{prefix}
Provide 3 possible completions. Return ONLY the code to insert, no explanation. Format as JSON: [“completion1”, “completion2”, “completion3”]"""
response = await self.client.chat_completion(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a code completion engine. Return only valid Python code."},
{"role": "user", "content": prompt}
],
temperature=0.3
)
import json
try:
return json.loads(response.content)
except:
return [response.content]
async def complete_function(
self,
signature: str,
docstring: str = None,
context: str = None
) -> str:
"""Generate complete function implementation."""
prompt = f"""Implement this function.
Signature: {signature} {f’Docstring: {docstring}’ if docstring else ”} {f’Context: {context}’ if context else ”}
Requirements:
- Follow Python best practices
- Include type hints
- Handle edge cases
- Add inline comments for complex logic
Return only the function implementation."""
response = await self.client.chat_completion(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
temperature=0.2
)
return response.content
## Code Explanation and Documentation
```python
class CodeExplainer:
"""Explain code using GPT-4."""
async def explain_code(
self,
code: str,
detail_level: str = "moderate"
) -> str:
"""Explain what code does."""
detail_prompts = {
"brief": "Explain in 2-3 sentences what this code does.",
"moderate": "Explain this code, covering the main logic and any notable patterns.",
"detailed": "Provide a detailed explanation including: purpose, algorithm, complexity, and potential improvements."
}
prompt = f"""{detail_prompts.get(detail_level, detail_prompts['moderate'])}
```python
{code}
```"""
response = await self.client.chat_completion(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return response.content
async def generate_docstring(
self,
function_code: str,
style: str = "google"
) -> str:
"""Generate docstring for function."""
style_examples = {
"google": '''"""Short description.
Args:
param1: Description.
param2: Description.
Returns:
Description of return value.
Raises:
ExceptionType: When this happens.
"""''',
"numpy": '''"""Short description.
Parameters
----------
param1 : type
Description.
Returns
-------
type
Description.
"""''',
"sphinx": '''"""Short description.
:param param1: Description.
:type param1: type
:returns: Description.
:rtype: type
"""'''
}
prompt = f"""Generate a docstring for this function in {style} style.
Function:
```python
{function_code}
Style example:
{style_examples.get(style, style_examples['google'])}
Return only the docstring."""
response = await self.client.chat_completion(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
temperature=0.1
)
return response.content
## Bug Detection and Fixing
```python
class BugDetector:
"""Detect and fix bugs with GPT-4."""
async def analyze_for_bugs(self, code: str) -> dict:
"""Analyze code for potential bugs."""
prompt = f"""Analyze this code for bugs, vulnerabilities, and issues.
```python
{code}
For each issue found, provide:
- Issue type (bug, security, performance, style)
- Severity (critical, high, medium, low)
- Line number(s)
- Description
- Suggested fix
Return as JSON: {{ “issues”: [ {{ “type”: “bug”, “severity”: “high”, “lines”: [10, 11], “description”: ”…”, “fix”: ”…” }} ], “summary”: “Overall assessment” }}"""
response = await self.client.chat_completion(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a code reviewer finding bugs."},
{"role": "user", "content": prompt}
],
temperature=0.1
)
import json
try:
return json.loads(response.content)
except:
return {"raw": response.content}
async def fix_bug(
self,
code: str,
error_message: str,
stack_trace: str = None
) -> dict:
"""Fix a bug given error information."""
context = f"\nStack trace:\n{stack_trace}" if stack_trace else ""
prompt = f"""Fix the bug in this code.
Code:
{code}
Error: {error_message} {context}
Provide:
-
Root cause analysis
-
Fixed code
-
Explanation of the fix"""
response = await self.client.chat_completion( model="gpt-4", messages=[{"role": "user", "content": prompt}] ) return { "analysis": response.content, "model": "gpt-4" }
## Test Generation
```python
class TestGenerator:
"""Generate tests with GPT-4."""
async def generate_unit_tests(
self,
function_code: str,
framework: str = "pytest"
) -> str:
"""Generate unit tests for a function."""
prompt = f"""Generate comprehensive unit tests for this function.
Function:
```python
{function_code}
Requirements:
- Use {framework}
- Cover happy path
- Cover edge cases (empty input, None, boundaries)
- Cover error cases
- Use descriptive test names
- Include setup/teardown if needed
Return only the test code."""
response = await self.client.chat_completion(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
temperature=0.2
)
return response.content
async def generate_test_cases(
self,
function_signature: str,
docstring: str
) -> list[dict]:
"""Generate test case ideas."""
prompt = f"""Generate test case ideas for this function.
Signature: {function_signature} Documentation: {docstring}
Return as JSON array: [ {{“name”: “test name”, “input”: ”…”, “expected”: ”…”, “type”: “happy_path|edge_case|error_case”}} ]"""
response = await self.client.chat_completion(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
temperature=0.3
)
import json
try:
return json.loads(response.content)
except:
return []
## Refactoring Assistant
```python
class RefactoringAssistant:
"""Code refactoring with GPT-4."""
async def suggest_refactoring(self, code: str) -> dict:
"""Suggest refactoring improvements."""
prompt = f"""Analyze this code and suggest refactoring improvements.
```python
{code}
Consider:
- Code duplication
- Function length (should be < 20 lines)
- Single responsibility
- Naming clarity
- Design patterns that could help
- Performance optimizations
For each suggestion, provide:
-
What to change
-
Why it improves the code
-
Example of the refactored version"""
response = await self.client.chat_completion( model="gpt-4", messages=[{"role": "user", "content": prompt}] ) return {"suggestions": response.content}async def refactor( self, code: str, refactoring_type: str ) -> str: """Apply specific refactoring."""
refactoring_prompts = { "extract_function": "Extract repeated code into reusable functions", "rename": "Improve variable and function names for clarity", "simplify": "Simplify complex conditionals and loops", "typing": "Add comprehensive type hints", "modernize": "Use modern Python features (f-strings, walrus, etc.)" } instruction = refactoring_prompts.get( refactoring_type, "Improve code quality" ) prompt = f"""Refactor this code: {instruction}
{code}
Return only the refactored code."""
response = await self.client.chat_completion(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
temperature=0.1
)
return response.content
## Integration Example
```python
class CodingAssistant:
"""Unified coding assistant."""
def __init__(self, client):
self.completer = GPT4CodeCompletion(client)
self.explainer = CodeExplainer()
self.bug_detector = BugDetector()
self.test_generator = TestGenerator()
self.refactorer = RefactoringAssistant()
async def assist(
self,
code: str,
action: str,
**kwargs
) -> dict:
"""Route to appropriate assistant."""
actions = {
"complete": self.completer.complete,
"explain": self.explainer.explain_code,
"document": self.explainer.generate_docstring,
"bugs": self.bug_detector.analyze_for_bugs,
"fix": self.bug_detector.fix_bug,
"test": self.test_generator.generate_unit_tests,
"refactor": self.refactorer.suggest_refactoring,
}
handler = actions.get(action)
if not handler:
return {"error": f"Unknown action: {action}"}
result = await handler(code, **kwargs)
return {"action": action, "result": result}
GPT-4 transforms what’s possible in developer tooling. The combination of better reasoning and longer context enables truly intelligent code assistance.