feature: v0.1 rust rules

This commit is contained in:
Tyr Chen
2025-05-31 15:08:09 -07:00
parent 1b2a3a4c02
commit 98e3bb8f48
19 changed files with 4288 additions and 142 deletions

View File

@@ -911,7 +911,7 @@ chrono = { workspace = true }
uuid = { workspace = true }
# Domain-specific dependencies
bcrypt = "0.15"
argon2 = "0.5"
validator = { version = "0.18", features = ["derive"] }
regex = { workspace = true }

View File

@@ -0,0 +1,798 @@
---
description:
globs:
alwaysApply: false
---
# 🎨 RUST API DESIGN BEST PRACTICES
> **TL;DR:** Comprehensive API design guidelines for creating ergonomic, maintainable, and idiomatic Rust libraries and services.
## 🔍 API DESIGN STRATEGY
```mermaid
graph TD
Start["API Design"] --> Purpose{"API<br>Purpose?"}
Purpose -->|Library| LibAPI["Library API"]
Purpose -->|Service| ServiceAPI["Service API"]
Purpose -->|CLI| CLIAPI["CLI API"]
LibAPI --> Ergonomics["Ergonomic Design"]
ServiceAPI --> RESTDesign["REST/gRPC Design"]
CLIAPI --> CLIDesign["Command Interface"]
Ergonomics --> FlexibleInputs["Flexible Input Types"]
Ergonomics --> BuilderPattern["Builder Pattern"]
Ergonomics --> ErrorDesign["Error Design"]
RESTDesign --> OpenAPI["OpenAPI Documentation"]
RESTDesign --> Validation["Input Validation"]
RESTDesign --> Authentication["Authentication"]
CLIDesign --> Subcommands["Subcommand Structure"]
CLIDesign --> Configuration["Configuration Management"]
CLIDesign --> HelpSystem["Help System"]
FlexibleInputs --> TraitDesign["Trait Design"]
BuilderPattern --> TraitDesign
ErrorDesign --> TraitDesign
OpenAPI --> AsyncAPI["Async API Patterns"]
Validation --> AsyncAPI
Authentication --> AsyncAPI
Subcommands --> Testing["Testing Strategy"]
Configuration --> Testing
HelpSystem --> Testing
TraitDesign --> Documentation["Documentation"]
AsyncAPI --> Documentation
Testing --> Documentation
Documentation --> APIComplete["API Complete"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style Ergonomics fill:#4dbb5f,stroke:#36873f,color:white
style RESTDesign fill:#ffa64d,stroke:#cc7a30,color:white
style CLIDesign fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 API DESIGN PRINCIPLES
### Ergonomic Function Signatures
```rust
use std::path::Path;
// ✅ Accept flexible input types
pub fn read_config<P: AsRef<Path>>(path: P) -> Result<Config, ConfigError> {
let path = path.as_ref();
// Implementation
}
// ✅ Use Into for string-like parameters
pub fn create_user<S: Into<String>>(name: S, email: S) -> Result<User, UserError> {
let name = name.into();
let email = email.into();
// Implementation
}
// ✅ Prefer borrowing over ownership when possible
pub fn validate_email(email: &str) -> Result<(), ValidationError> {
// Implementation - doesn't need to own the string
}
// ✅ Return owned data when caller needs ownership
pub fn generate_token() -> String {
// Implementation returns owned String
}
// ❌ Avoid overly generic signatures without clear benefit
// pub fn process<T, U, F>(input: T, func: F) -> U where F: Fn(T) -> U
```
### Builder Pattern Implementation
```rust
use typed_builder::TypedBuilder;
// ✅ Use TypedBuilder for complex configuration
#[derive(Debug, TypedBuilder)]
pub struct HttpClient {
#[builder(setter(into))]
base_url: String,
#[builder(default = Duration::from_secs(30))]
timeout: Duration,
#[builder(default)]
headers: HashMap<String, String>,
#[builder(default, setter(strip_option))]
proxy: Option<String>,
#[builder(default = false)]
verify_ssl: bool,
}
impl HttpClient {
// ✅ Provide a simple constructor for common cases
pub fn new<S: Into<String>>(base_url: S) -> Self {
Self::builder()
.base_url(base_url)
.build()
}
// ✅ Provide convenient factory methods
pub fn with_auth<S: Into<String>>(base_url: S, token: S) -> Self {
let mut headers = HashMap::new();
headers.insert("Authorization".to_string(), format!("Bearer {}", token.into()));
Self::builder()
.base_url(base_url)
.headers(headers)
.build()
}
}
// ✅ Usage examples
let client = HttpClient::new("https://api.example.com");
let authenticated_client = HttpClient::builder()
.base_url("https://api.example.com")
.timeout(Duration::from_secs(60))
.verify_ssl(true)
.build();
```
### Error Handling Design
```rust
use thiserror::Error;
// ✅ Well-structured error hierarchy
#[derive(Error, Debug)]
pub enum ApiError {
#[error("Network error: {source}")]
Network {
#[from]
source: reqwest::Error,
},
#[error("Invalid request: {message}")]
InvalidRequest { message: String },
#[error("Authentication failed")]
Authentication,
#[error("Resource not found: {resource_type} with id {id}")]
NotFound {
resource_type: String,
id: String,
},
#[error("Rate limit exceeded: retry after {retry_after} seconds")]
RateLimit { retry_after: u64 },
#[error("Server error: {status_code}")]
Server { status_code: u16 },
}
impl ApiError {
// ✅ Provide utility methods for error classification
pub fn is_retryable(&self) -> bool {
matches!(
self,
ApiError::Network { .. } | ApiError::RateLimit { .. } | ApiError::Server { status_code } if *status_code >= 500
)
}
pub fn retry_after(&self) -> Option<Duration> {
match self {
ApiError::RateLimit { retry_after } => Some(Duration::from_secs(*retry_after)),
_ => None,
}
}
}
// ✅ Domain-specific result type
pub type ApiResult<T> = Result<T, ApiError>;
```
## 🔄 TRAIT DESIGN PATTERNS
### Cohesive Trait Design
```rust
// ✅ Single responsibility traits
pub trait Serializable {
fn serialize(&self) -> Result<Vec<u8>, SerializationError>;
fn deserialize(data: &[u8]) -> Result<Self, SerializationError>
where
Self: Sized;
}
pub trait Cacheable {
type Key;
fn cache_key(&self) -> Self::Key;
fn cache_ttl(&self) -> Option<Duration>;
}
// ✅ Composable traits
pub trait Repository<T> {
type Error;
type Id;
async fn find_by_id(&self, id: Self::Id) -> Result<Option<T>, Self::Error>;
async fn save(&self, entity: &T) -> Result<T, Self::Error>;
async fn delete(&self, id: Self::Id) -> Result<bool, Self::Error>;
}
pub trait Queryable<T>: Repository<T> {
type Query;
type Page;
async fn find_by_query(&self, query: Self::Query) -> Result<Vec<T>, Self::Error>;
async fn find_paginated(&self, query: Self::Query, page: Self::Page) -> Result<(Vec<T>, bool), Self::Error>;
}
// ✅ Default implementations for common patterns
pub trait Timestamped {
fn created_at(&self) -> DateTime<Utc>;
fn updated_at(&self) -> DateTime<Utc>;
// Default implementation for age calculation
fn age(&self) -> Duration {
Utc::now().signed_duration_since(self.created_at()).to_std().unwrap_or_default()
}
}
```
### Extension Traits
```rust
// ✅ Extension traits for external types
pub trait StringExtensions {
fn is_valid_email(&self) -> bool;
fn to_snake_case(&self) -> String;
fn truncate_with_ellipsis(&self, max_len: usize) -> String;
}
impl StringExtensions for str {
fn is_valid_email(&self) -> bool {
// Email validation logic
self.contains('@') && self.contains('.')
}
fn to_snake_case(&self) -> String {
// Snake case conversion
self.chars()
.map(|c| if c.is_uppercase() { format!("_{}", c.to_lowercase()) } else { c.to_string() })
.collect::<String>()
.trim_start_matches('_')
.to_string()
}
fn truncate_with_ellipsis(&self, max_len: usize) -> String {
if self.len() <= max_len {
self.to_string()
} else {
format!("{}...", &self[..max_len.saturating_sub(3)])
}
}
}
// ✅ Extension traits for Result types
pub trait ResultExtensions<T, E> {
fn log_error(self) -> Self;
fn with_context<F>(self, f: F) -> Result<T, ContextError<E>>
where
F: FnOnce() -> String;
}
impl<T, E: std::fmt::Debug> ResultExtensions<T, E> for Result<T, E> {
fn log_error(self) -> Self {
if let Err(ref e) = self {
tracing::error!("Operation failed: {:?}", e);
}
self
}
fn with_context<F>(self, f: F) -> Result<T, ContextError<E>>
where
F: FnOnce() -> String,
{
self.map_err(|e| ContextError {
context: f(),
source: e,
})
}
}
```
## 📦 MODULE ORGANIZATION
### Public API Structure
```rust
// lib.rs - Main library entry point
//! # MyLibrary
//!
//! A comprehensive library for handling X, Y, and Z.
//!
//! ## Quick Start
//!
//! ```rust
//! use my_library::Client;
//!
//! let client = Client::new("api-key");
//! let result = client.fetch_data().await?;
//! ```
//!
//! ## Features
//!
//! - Feature A: Enable with `features = ["feature-a"]`
//! - Feature B: Enable with `features = ["feature-b"]`
// Re-export main public API
pub use client::Client;
pub use config::Config;
pub use error::{Error, Result};
// Re-export important types
pub use types::{User, Product, Order};
// Module declarations
mod client;
mod config;
mod error;
mod types;
// Internal modules (not re-exported)
mod internal {
pub mod auth;
pub mod http;
pub mod serialization;
}
// Prelude module for convenient imports
pub mod prelude {
pub use crate::{Client, Config, Error, Result};
pub use crate::types::*;
}
// Feature-gated modules
#[cfg(feature = "async")]
pub mod async_client;
#[cfg(feature = "blocking")]
pub mod blocking_client;
```
### Documentation Standards
```rust
/// A client for interacting with the Example API.
///
/// The `Client` provides methods for authentication, data retrieval,
/// and resource management. It handles rate limiting, retries, and
/// error handling automatically.
///
/// # Examples
///
/// Basic usage:
///
/// ```rust
/// use my_library::Client;
///
/// # tokio_test::block_on(async {
/// let client = Client::new("your-api-key");
/// let users = client.list_users().await?;
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// # });
/// ```
///
/// With custom configuration:
///
/// ```rust
/// use my_library::{Client, Config};
/// use std::time::Duration;
///
/// let config = Config::builder()
/// .timeout(Duration::from_secs(30))
/// .retry_attempts(3)
/// .build();
///
/// let client = Client::with_config("your-api-key", config);
/// ```
pub struct Client {
api_key: String,
config: Config,
http_client: reqwest::Client,
}
impl Client {
/// Creates a new client with the given API key.
///
/// Uses default configuration with reasonable timeouts and retry settings.
///
/// # Arguments
///
/// * `api_key` - Your API key for authentication
///
/// # Examples
///
/// ```rust
/// use my_library::Client;
///
/// let client = Client::new("sk-1234567890abcdef");
/// ```
pub fn new<S: Into<String>>(api_key: S) -> Self {
Self::with_config(api_key, Config::default())
}
/// Creates a new client with custom configuration.
///
/// # Arguments
///
/// * `api_key` - Your API key for authentication
/// * `config` - Custom configuration settings
///
/// # Examples
///
/// ```rust
/// use my_library::{Client, Config};
/// use std::time::Duration;
///
/// let config = Config::builder()
/// .timeout(Duration::from_secs(60))
/// .build();
///
/// let client = Client::with_config("api-key", config);
/// ```
pub fn with_config<S: Into<String>>(api_key: S, config: Config) -> Self {
// Implementation
}
/// Retrieves a list of users.
///
/// # Returns
///
/// A `Result` containing a vector of `User` objects on success,
/// or an `Error` on failure.
///
/// # Errors
///
/// This function will return an error if:
///
/// * The API key is invalid (`Error::Authentication`)
/// * The request times out (`Error::Network`)
/// * The server returns an error (`Error::Server`)
///
/// # Examples
///
/// ```rust
/// # use my_library::{Client, Error};
/// # tokio_test::block_on(async {
/// let client = Client::new("api-key");
///
/// match client.list_users().await {
/// Ok(users) => println!("Found {} users", users.len()),
/// Err(Error::Authentication) => eprintln!("Invalid API key"),
/// Err(e) => eprintln!("Request failed: {}", e),
/// }
/// # });
/// ```
pub async fn list_users(&self) -> Result<Vec<User>, Error> {
// Implementation
}
}
```
## 🔧 CONFIGURATION PATTERNS
### Layered Configuration
```rust
use serde::{Deserialize, Serialize};
use std::path::Path;
// ✅ Configuration with multiple sources
#[derive(Debug, Clone, Serialize, Deserialize, TypedBuilder)]
pub struct Config {
// Server settings
#[builder(default = "127.0.0.1".to_string(), setter(into))]
pub host: String,
#[builder(default = 8080)]
pub port: u16,
// API settings
#[builder(default = Duration::from_secs(30))]
pub timeout: Duration,
#[builder(default = 3)]
pub retry_attempts: u32,
// Feature flags
#[builder(default = true)]
pub enable_metrics: bool,
#[builder(default = false)]
pub debug_mode: bool,
}
impl Config {
/// Load configuration from multiple sources with precedence:
/// 1. Environment variables (highest priority)
/// 2. Configuration file
/// 3. Defaults (lowest priority)
pub fn load() -> Result<Self, ConfigError> {
let mut config = Self::default();
// Load from file if it exists
if let Ok(file_config) = Self::from_file("config.toml") {
config = config.merge(file_config);
}
// Override with environment variables
config = config.merge(Self::from_env()?);
Ok(config)
}
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, ConfigError> {
let content = std::fs::read_to_string(path)
.map_err(ConfigError::FileRead)?;
toml::from_str(&content)
.map_err(ConfigError::ParseError)
}
pub fn from_env() -> Result<Self, ConfigError> {
let mut builder = Self::builder();
if let Ok(host) = std::env::var("HOST") {
builder = builder.host(host);
}
if let Ok(port) = std::env::var("PORT") {
let port = port.parse()
.map_err(|_| ConfigError::InvalidPort)?;
builder = builder.port(port);
}
if let Ok(timeout) = std::env::var("TIMEOUT_SECONDS") {
let seconds = timeout.parse()
.map_err(|_| ConfigError::InvalidTimeout)?;
builder = builder.timeout(Duration::from_secs(seconds));
}
Ok(builder.build())
}
fn merge(self, other: Self) -> Self {
// Merge logic - other takes precedence
Self {
host: if other.host != "127.0.0.1" { other.host } else { self.host },
port: if other.port != 8080 { other.port } else { self.port },
timeout: if other.timeout != Duration::from_secs(30) { other.timeout } else { self.timeout },
retry_attempts: if other.retry_attempts != 3 { other.retry_attempts } else { self.retry_attempts },
enable_metrics: other.enable_metrics, // Boolean fields always take the other value
debug_mode: other.debug_mode,
}
}
}
impl Default for Config {
fn default() -> Self {
Self::builder().build()
}
}
```
## 🎭 ASYNC API PATTERNS
### Async Iterator and Stream Design
```rust
use futures::Stream;
use std::pin::Pin;
// ✅ Async iterator for paginated results
pub struct PaginatedStream<T> {
client: Arc<Client>,
query: Query,
current_page: Option<String>,
buffer: VecDeque<T>,
exhausted: bool,
}
impl<T> PaginatedStream<T> {
pub fn new(client: Arc<Client>, query: Query) -> Self {
Self {
client,
query,
current_page: None,
buffer: VecDeque::new(),
exhausted: false,
}
}
}
impl<T: Unpin> Stream for PaginatedStream<T>
where
T: for<'de> Deserialize<'de> + Send + 'static,
{
type Item = Result<T, ApiError>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
if let Some(item) = self.buffer.pop_front() {
return Poll::Ready(Some(Ok(item)));
}
if self.exhausted {
return Poll::Ready(None);
}
// Fetch next page
let client = self.client.clone();
let query = self.query.clone();
let page = self.current_page.clone();
let future = async move {
client.fetch_page(query, page).await
};
// Poll the future and handle the result
// Implementation depends on your async runtime
todo!("Implement polling logic")
}
}
// ✅ Cancellation-aware async operations
pub struct CancellableOperation<T> {
inner: Pin<Box<dyn Future<Output = Result<T, ApiError>> + Send>>,
cancel_token: CancelToken,
}
impl<T> CancellableOperation<T> {
pub fn new<F>(future: F, cancel_token: CancelToken) -> Self
where
F: Future<Output = Result<T, ApiError>> + Send + 'static,
{
Self {
inner: Box::pin(future),
cancel_token,
}
}
}
impl<T> Future for CancellableOperation<T> {
type Output = Result<T, ApiError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.cancel_token.is_cancelled() {
return Poll::Ready(Err(ApiError::Cancelled));
}
self.inner.as_mut().poll(cx)
}
}
```
## 🔍 TESTING API DESIGN
### Testable API Structure
```rust
// ✅ Dependency injection for testability
pub trait HttpClientTrait: Send + Sync {
async fn get(&self, url: &str) -> Result<Response, HttpError>;
async fn post(&self, url: &str, body: Vec<u8>) -> Result<Response, HttpError>;
}
pub struct Client<H: HttpClientTrait> {
http_client: H,
config: Config,
}
impl<H: HttpClientTrait> Client<H> {
pub fn new(http_client: H, config: Config) -> Self {
Self { http_client, config }
}
pub async fn fetch_user(&self, id: &str) -> Result<User, ApiError> {
let url = format!("{}/users/{}", self.config.base_url, id);
let response = self.http_client.get(&url).await?;
// Parse response
todo!()
}
}
// ✅ Production implementation
impl HttpClientTrait for reqwest::Client {
async fn get(&self, url: &str) -> Result<Response, HttpError> {
// Implementation
}
async fn post(&self, url: &str, body: Vec<u8>) -> Result<Response, HttpError> {
// Implementation
}
}
// ✅ Mock implementation for testing
#[cfg(test)]
pub struct MockHttpClient {
responses: HashMap<String, Result<Response, HttpError>>,
}
#[cfg(test)]
impl MockHttpClient {
pub fn new() -> Self {
Self {
responses: HashMap::new(),
}
}
pub fn expect_get(&mut self, url: &str, response: Result<Response, HttpError>) {
self.responses.insert(format!("GET {}", url), response);
}
}
#[cfg(test)]
impl HttpClientTrait for MockHttpClient {
async fn get(&self, url: &str) -> Result<Response, HttpError> {
self.responses
.get(&format!("GET {}", url))
.cloned()
.unwrap_or(Err(HttpError::NotFound))
}
async fn post(&self, url: &str, _body: Vec<u8>) -> Result<Response, HttpError> {
self.responses
.get(&format!("POST {}", url))
.cloned()
.unwrap_or(Err(HttpError::NotFound))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_fetch_user_success() {
let mut mock_client = MockHttpClient::new();
mock_client.expect_get(
"https://api.example.com/users/123",
Ok(Response {
status: 200,
body: r#"{"id": "123", "name": "John"}"#.to_string(),
}),
);
let client = Client::new(mock_client, Config::default());
let user = client.fetch_user("123").await.unwrap();
assert_eq!(user.id, "123");
assert_eq!(user.name, "John");
}
}
```
## ✅ API DESIGN CHECKLIST
```markdown
### API Design Implementation Verification
- [ ] Function signatures accept flexible input types (AsRef, Into)
- [ ] Error types are well-structured with proper context
- [ ] Builder pattern used for complex configuration
- [ ] Traits have single responsibility and clear contracts
- [ ] Public API is well-documented with examples
- [ ] Configuration supports multiple sources with precedence
- [ ] Async APIs handle cancellation and backpressure
- [ ] Dependencies are injected for testability
- [ ] Extension traits enhance existing types ergonomically
- [ ] Module organization follows convention
- [ ] Feature gates are used appropriately
- [ ] Error handling provides actionable information
- [ ] API follows Rust naming conventions
- [ ] Generic parameters have appropriate bounds
- [ ] Public API surface is minimal but complete
```
This API design guide ensures consistent, ergonomic, and maintainable interfaces across Rust projects.

View File

@@ -22,124 +22,6 @@ alwaysApply: false
- **Production-ready code**: All code must be deployable and maintainable
- **No `unwrap()` or `expect()`** in production code - use proper error handling
## 📦 DEPENDENCY MANAGEMENT
### Workspace Dependencies Priority
```toml
# Always prefer workspace dependencies first
[dependencies]
tokio = { workspace = true }
serde = { workspace = true, features = ["derive"] }
# Only add new dependencies if not available in workspace
# Request permission before modifying Cargo.toml
```
### Standard Crate Recommendations
When adding new dependencies, prefer these battle-tested crates:
```toml
# Core utilities
anyhow = "1.0" # Error handling
thiserror = "2.0" # Error type definitions
derive_more = { version = "2", features = ["full"] } # Extended derive macros
typed-builder = "0.21" # Builder pattern
# Async/Concurrency
tokio = { version = "1.45", features = [
"macros",
"rt-multi-thread",
"signal",
"sync"
] }
async-trait = "0.1" # Async traits
futures = "0.3" # Async utilities
dashmap = { version = "6", features = ["serde"] } # Concurrent HashMap
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9"
base64 = "0.22"
# Web/HTTP
axum = { version = "0.8", features = ["macros", "http2"] }
reqwest = { version = "0.12", default-features = false, features = [
"charset",
"rustls-tls-webpki-roots",
"http2",
"json",
"cookies",
"gzip",
"brotli",
"zstd",
"deflate"
] }
tower = { version = "0.5", features = ["util"] }
tower-http = { version = "0.6", features = ["cors", "trace"] }
http = "1"
# Database
sqlx = { version = "0.8", features = [
"chrono",
"postgres",
"runtime-tokio-rustls",
"sqlite",
"time",
"uuid"
] }
# Documentation/API
utoipa = { version = "5", features = ["axum_extras"] }
utoipa-axum = { version = "0.2" }
utoipa-swagger-ui = { version = "9", features = [
"axum",
"vendored"
], default-features = false }
schemars = { version = "0.8", features = ["chrono", "url"] }
# Time/Date
chrono = { version = "0.4", features = ["serde"] }
time = { version = "0.3", features = ["serde"] }
# Templating/Text Processing
minijinja = { version = "2", features = [
"json",
"loader",
"loop_controls",
"speedups"
] }
regex = "1"
htmd = "0.2" # HTML to Markdown
# Authentication/Security
jsonwebtoken = "9.0"
uuid = { version = "1.17", features = ["v4", "serde"] }
# Data Processing
jsonpath-rust = "1"
url = "2.5"
# CLI (when needed)
clap = { version = "4.0", features = ["derive"] }
# Utilities
rand = "0.8"
getrandom = "0.3"
atomic_enum = "0.3" # Atomic enumerations
# Logging/Observability
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
```
### Version Strategy
- **Always use latest versions** when adding new dependencies
- **Request permission** before modifying `Cargo.toml`
- **Check workspace first** - never duplicate dependencies unnecessarily
- **Use specific feature flags** to minimize compilation time and binary size
- **Prefer rustls over openssl** for TLS (better for cross-compilation)
## 🏗️ CODE STRUCTURE PATTERNS
### Data Structure Organization
@@ -332,19 +214,6 @@ pub struct DatabaseManagerImpl;
// Any file > 500 lines (excluding tests) needs refactoring
```
### Dependency Anti-Patterns
```rust
// ❌ Don't duplicate workspace dependencies
[dependencies]
tokio = "1.0" # Already in workspace
// ❌ Don't modify Cargo.toml without permission
# Always ask before adding new dependencies
// ❌ Don't use outdated versions
serde = "0.9" # Use latest stable
```
## ✅ QUALITY CHECKLIST
```markdown
@@ -362,7 +231,6 @@ serde = "0.9" # Use latest stable
- [ ] `cargo test` passes
- [ ] `cargo clippy` passes with no warnings
- [ ] Public APIs documented with examples
- [ ] Workspace dependencies used when available
```
This code quality standard ensures consistent, maintainable, and production-ready Rust code across all projects.

View File

@@ -0,0 +1,325 @@
---
description:
globs:
alwaysApply: false
---
# 📦 RUST DEPENDENCY MANAGEMENT
> **TL;DR:** Centralized dependency management guidelines for consistent, secure, and maintainable Rust projects.
## 🔍 DEPENDENCY MANAGEMENT STRATEGY
```mermaid
graph TD
Start["Project Setup"] --> WorkspaceCheck{"Workspace<br>Project?"}
WorkspaceCheck -->|Yes| WorkspaceRoot["Use Workspace Dependencies"]
WorkspaceCheck -->|No| SingleCrate["Single Crate Dependencies"]
WorkspaceRoot --> WorkspaceTable["[workspace.dependencies]<br>Define versions centrally"]
SingleCrate --> DirectDeps["[dependencies]<br>Direct version specification"]
WorkspaceTable --> CrateUsage["[dependencies]<br>crate = { workspace = true }"]
CrateUsage --> SecurityCheck["Security Assessment"]
DirectDeps --> SecurityCheck
SecurityCheck --> Audit["cargo audit"]
Audit --> Outdated["cargo outdated"]
Outdated --> VersionPin["Pin Critical Versions"]
VersionPin --> FeatureGates["Feature Gate Optional Deps"]
FeatureGates --> Testing["Testing Dependencies"]
Testing --> Documentation["Document Choices"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style WorkspaceRoot fill:#4dbb5f,stroke:#36873f,color:white
style SingleCrate fill:#ffa64d,stroke:#cc7a30,color:white
style SecurityCheck fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 DEPENDENCY STRATEGY
### Workspace Dependencies Priority
```toml
# Always prefer workspace dependencies first
[dependencies]
tokio = { workspace = true }
serde = { workspace = true, features = ["derive"] }
# Only add new dependencies if not available in workspace
# Request permission before modifying Cargo.toml
```
## 📋 STANDARD CRATE RECOMMENDATIONS
### Core Utilities
```toml
# Error handling
anyhow = "1.0" # Simple error handling
thiserror = "2.0" # Structured error types
derive_more = { version = "2", features = ["full"] } # Extended derive macros
# Data structures
typed-builder = "0.21" # Builder pattern
uuid = { version = "1.17", features = ["v4", "v7", "serde"] }
chrono = { version = "0.4", features = ["serde"] }
time = { version = "0.3", features = ["serde"] }
```
### Async/Concurrency
```toml
tokio = { version = "1.45", features = [
"macros",
"rt-multi-thread",
"signal",
"sync",
"fs",
"net",
"time"
] }
async-trait = "0.1" # Async traits
futures = "0.3" # Async utilities
dashmap = { version = "6", features = ["serde"] } # Concurrent HashMap
```
### Serialization
```toml
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9"
base64 = "0.22"
```
### Web/HTTP
```toml
axum = { version = "0.8", features = ["macros", "http2", "multipart"] }
reqwest = { version = "0.12", default-features = false, features = [
"charset",
"rustls-tls-webpki-roots",
"http2",
"json",
"cookies",
"gzip",
"brotli",
"zstd",
"deflate"
] }
tower = { version = "0.5", features = ["util", "timeout", "load-shed"] }
tower-http = { version = "0.6", features = ["cors", "trace", "compression"] }
http = "1.0"
```
### Database
```toml
sqlx = { version = "0.8", features = [
"chrono",
"postgres",
"runtime-tokio-rustls",
"sqlite",
"time",
"uuid",
"json"
] }
```
### Documentation/API
```toml
utoipa = { version = "5", features = ["axum_extras", "chrono", "uuid"] }
utoipa-axum = "0.2"
utoipa-swagger-ui = { version = "9", features = [
"axum",
"vendored"
], default-features = false }
schemars = { version = "0.8", features = ["chrono", "url"] }
```
### CLI Applications
```toml
clap = { version = "4.0", features = ["derive", "env", "unicode"] }
dialoguer = "0.11" # Interactive prompts
indicatif = "0.17" # Progress bars
colored = "2.0" # Terminal colors
console = "0.15" # Terminal utilities
```
### gRPC/Protobuf
```toml
tonic = { version = "0.13", features = ["transport", "codegen", "prost"] }
prost = "0.13"
prost-types = "0.13"
tonic-build = "0.13"
prost-build = "0.13"
tonic-health = "0.13"
tonic-reflection = "0.13"
```
### Development/Testing
```toml
[dev-dependencies]
tempfile = "3.0" # Temporary files
wiremock = "0.6" # HTTP mocking
assert_cmd = "2.0" # CLI testing
predicates = "3.0" # Test assertions
axum-test = "15.0" # Axum testing
tokio-test = "0.4" # Tokio testing utilities
```
## 🔧 FEATURE FLAG STRATEGY
### Minimal Feature Sets
```toml
# ✅ Good: Only enable needed features
reqwest = { version = "0.12", default-features = false, features = [
"rustls-tls-webpki-roots", # TLS support
"json", # JSON serialization
"gzip" # Compression
] }
# ❌ Bad: Enabling all features
# reqwest = { version = "0.12", features = ["full"] }
```
### Feature Documentation
```toml
# Document why each feature is needed
tokio = { version = "1.45", features = [
"macros", # #[tokio::main] and #[tokio::test]
"rt-multi-thread", # Multi-threaded runtime
"signal", # Signal handling for graceful shutdown
"net", # Network primitives
"fs", # File system operations
"time" # Time utilities
] }
```
## 🔒 SECURITY CONSIDERATIONS
### TLS Configuration
```toml
# ✅ Prefer rustls over openssl
reqwest = { version = "0.12", default-features = false, features = [
"rustls-tls-webpki-roots" # Use rustls with web PKI roots
] }
# ❌ Avoid native-tls when possible
# reqwest = { version = "0.12", features = ["native-tls"] }
```
### Crypto Dependencies
```toml
# Use well-established crypto crates
rand = { version = "0.8", features = ["std_rng"] }
getrandom = { version = "0.3", features = ["std"] }
jsonwebtoken = "9.0"
argon2 = "0.15"
```
## 📊 VERSION STRATEGY
### Version Selection Rules
1. **Always use latest stable versions** for new dependencies
2. **Use semantic versioning** - prefer `"1.0"` over `"=1.0.0"`
3. **Check workspace first** - never duplicate dependencies
4. **Document breaking changes** when updating major versions
### Workspace Version Management
```toml
# workspace Cargo.toml
[workspace.dependencies]
tokio = { version = "1.45", features = ["macros", "rt-multi-thread"] }
serde = { version = "1.0", features = ["derive"] }
anyhow = "1.0"
thiserror = "2.0"
uuid = { version = "1.17", features = ["v4", "serde"] }
# Individual crate Cargo.toml
[dependencies]
tokio = { workspace = true, features = ["signal"] } # Add extra features as needed
serde = { workspace = true }
anyhow = { workspace = true }
```
## 🚨 DEPENDENCY ANTI-PATTERNS
### What to Avoid
```toml
# ❌ Don't duplicate workspace dependencies
[dependencies]
tokio = "1.0" # Already in workspace
# ❌ Don't enable unnecessary features
tokio = { version = "1.45", features = ["full"] } # Too broad
# ❌ Don't use outdated versions
serde = "0.9" # Use latest stable
# ❌ Don't mix TLS implementations
reqwest = { version = "0.12", features = ["native-tls", "rustls-tls"] }
# ❌ Don't use git dependencies in production
my-crate = { git = "https://github.com/user/repo" }
```
### Common Mistakes
```rust
// ❌ Don't import with wildcard
use serde::*;
// ✅ Import specific items
use serde::{Deserialize, Serialize};
// ❌ Don't use deprecated APIs
use std::sync::ONCE_INIT; // Deprecated
// ✅ Use modern alternatives
use std::sync::Once;
```
## 📝 DEPENDENCY AUDIT
### Regular Maintenance
```bash
# Check for outdated dependencies
cargo outdated
# Audit for security vulnerabilities
cargo audit
# Check for unused dependencies
cargo machete
# Update dependencies
cargo update
```
### Security Best Practices
```toml
# Pin security-critical dependencies
openssl = "=0.10.64" # Pin exact version for security
# Use cargo-deny for policy enforcement
[advisories]
db-path = "~/.cargo/advisory-db"
db-urls = ["https://github.com/rustsec/advisory-db"]
vulnerability = "deny"
unmaintained = "warn"
```
## ✅ DEPENDENCY CHECKLIST
```markdown
### Dependency Management Verification
- [ ] Uses workspace dependencies when available
- [ ] Features flags are minimal and documented
- [ ] Prefers rustls over native-tls
- [ ] Uses latest stable versions
- [ ] Security-critical deps are audited
- [ ] No duplicate dependencies across workspace
- [ ] Dev dependencies separated from runtime deps
- [ ] Feature documentation explains necessity
- [ ] Regular dependency updates scheduled
- [ ] Vulnerability scanning enabled
```
This dependency management guide ensures consistent, secure, and maintainable dependency choices across all Rust projects.

View File

@@ -0,0 +1,873 @@
---
description:
globs:
alwaysApply: false
---
# 🎭 RUST DESIGN PATTERNS
> **TL;DR:** Essential design patterns for Rust applications, focusing on idiomatic solutions that leverage Rust's ownership system and zero-cost abstractions.
## 🔍 DESIGN PATTERN SELECTION STRATEGY
```mermaid
graph TD
Start["Design Challenge"] --> ProblemType{"Problem<br>Category?"}
ProblemType -->|Object Creation| Creational["Creational Patterns"]
ProblemType -->|Object Behavior| Behavioral["Behavioral Patterns"]
ProblemType -->|Object Structure| Structural["Structural Patterns"]
ProblemType -->|Concurrency| ConcurrencyP["Concurrency Patterns"]
Creational --> BuilderCheck{"Complex<br>Configuration?"}
Creational --> FactoryCheck{"Multiple<br>Implementations?"}
BuilderCheck -->|Yes| TypeStateBuilder["Type-State Builder"]
FactoryCheck -->|Yes| AbstractFactory["Abstract Factory"]
Behavioral --> StrategyCheck{"Runtime Algorithm<br>Selection?"}
Behavioral --> CommandCheck{"Undo/Redo<br>Required?"}
Behavioral --> ObserverCheck{"Event-Driven<br>Architecture?"}
StrategyCheck -->|Yes| StrategyPattern["Strategy Pattern"]
CommandCheck -->|Yes| CommandPattern["Command Pattern"]
ObserverCheck -->|Yes| ObserverPattern["Observer Pattern"]
Structural --> AdapterCheck{"External API<br>Integration?"}
Structural --> DecoratorCheck{"Cross-Cutting<br>Concerns?"}
AdapterCheck -->|Yes| AdapterPattern["Adapter Pattern"]
DecoratorCheck -->|Yes| DecoratorPattern["Decorator Pattern"]
ConcurrencyP --> ActorCheck{"Isolated State<br>Management?"}
ConcurrencyP --> PipelineCheck{"Data Pipeline<br>Processing?"}
ActorCheck -->|Yes| ActorPattern["Actor Pattern"]
PipelineCheck -->|Yes| PipelinePattern["Pipeline Pattern"]
TypeStateBuilder --> Implementation["Implementation"]
AbstractFactory --> Implementation
StrategyPattern --> Implementation
CommandPattern --> Implementation
ObserverPattern --> Implementation
AdapterPattern --> Implementation
DecoratorPattern --> Implementation
ActorPattern --> Implementation
PipelinePattern --> Implementation
style Start fill:#4da6ff,stroke:#0066cc,color:white
style Creational fill:#4dbb5f,stroke:#36873f,color:white
style Behavioral fill:#ffa64d,stroke:#cc7a30,color:white
style Structural fill:#d94dbb,stroke:#a3378a,color:white
style ConcurrencyP fill:#9d4dbb,stroke:#7a3a8a,color:white
```
## 🏗️ CREATIONAL PATTERNS
### Builder Pattern with Type State
```rust
use std::marker::PhantomData;
// ✅ Type-safe builder preventing invalid configurations
pub struct DatabaseConfigBuilder<HasHost, HasPort, HasDatabase> {
host: Option<String>,
port: Option<u16>,
database: Option<String>,
username: Option<String>,
password: Option<String>,
_marker: PhantomData<(HasHost, HasPort, HasDatabase)>,
}
pub struct Missing;
pub struct Present;
impl DatabaseConfigBuilder<Missing, Missing, Missing> {
pub fn new() -> Self {
Self {
host: None,
port: None,
database: None,
username: None,
password: None,
_marker: PhantomData,
}
}
}
impl<HasPort, HasDatabase> DatabaseConfigBuilder<Missing, HasPort, HasDatabase> {
pub fn host(self, host: impl Into<String>) -> DatabaseConfigBuilder<Present, HasPort, HasDatabase> {
DatabaseConfigBuilder {
host: Some(host.into()),
port: self.port,
database: self.database,
username: self.username,
password: self.password,
_marker: PhantomData,
}
}
}
impl<HasHost, HasDatabase> DatabaseConfigBuilder<HasHost, Missing, HasDatabase> {
pub fn port(self, port: u16) -> DatabaseConfigBuilder<HasHost, Present, HasDatabase> {
DatabaseConfigBuilder {
host: self.host,
port: Some(port),
database: self.database,
username: self.username,
password: self.password,
_marker: PhantomData,
}
}
}
impl<HasHost, HasPort> DatabaseConfigBuilder<HasHost, HasPort, Missing> {
pub fn database(self, database: impl Into<String>) -> DatabaseConfigBuilder<HasHost, HasPort, Present> {
DatabaseConfigBuilder {
host: self.host,
port: self.port,
database: Some(database.into()),
username: self.username,
password: self.password,
_marker: PhantomData,
}
}
}
impl<HasHost, HasPort, HasDatabase> DatabaseConfigBuilder<HasHost, HasPort, HasDatabase> {
pub fn username(mut self, username: impl Into<String>) -> Self {
self.username = Some(username.into());
self
}
pub fn password(mut self, password: impl Into<String>) -> Self {
self.password = Some(password.into());
self
}
}
// Only allow building when all required fields are present
impl DatabaseConfigBuilder<Present, Present, Present> {
pub fn build(self) -> DatabaseConfig {
DatabaseConfig {
host: self.host.unwrap(),
port: self.port.unwrap(),
database: self.database.unwrap(),
username: self.username,
password: self.password,
}
}
}
// ✅ Usage - compiler enforces required fields
let config = DatabaseConfigBuilder::new()
.host("localhost")
.port(5432)
.database("myapp")
.username("admin")
.build();
```
### Factory Pattern with Associated Types
```rust
// ✅ Factory pattern for creating different database connections
pub trait ConnectionFactory {
type Connection;
type Config;
type Error;
fn create_connection(config: Self::Config) -> Result<Self::Connection, Self::Error>;
fn connection_type() -> &'static str;
}
pub struct PostgresFactory;
pub struct SqliteFactory;
impl ConnectionFactory for PostgresFactory {
type Connection = sqlx::PgPool;
type Config = PostgresConfig;
type Error = sqlx::Error;
fn create_connection(config: Self::Config) -> Result<Self::Connection, Self::Error> {
// Implementation
}
fn connection_type() -> &'static str {
"PostgreSQL"
}
}
impl ConnectionFactory for SqliteFactory {
type Connection = sqlx::SqlitePool;
type Config = SqliteConfig;
type Error = sqlx::Error;
fn create_connection(config: Self::Config) -> Result<Self::Connection, Self::Error> {
// Implementation
}
fn connection_type() -> &'static str {
"SQLite"
}
}
// ✅ Generic database service using factory
pub struct DatabaseService<F: ConnectionFactory> {
connection: F::Connection,
_factory: PhantomData<F>,
}
impl<F: ConnectionFactory> DatabaseService<F> {
pub fn new(config: F::Config) -> Result<Self, F::Error> {
let connection = F::create_connection(config)?;
Ok(Self {
connection,
_factory: PhantomData,
})
}
pub fn connection_info(&self) -> &'static str {
F::connection_type()
}
}
```
## 🔄 BEHAVIORAL PATTERNS
### Strategy Pattern with Enums
```rust
// ✅ Strategy pattern for different authentication methods
#[derive(Debug, Clone)]
pub enum AuthStrategy {
Bearer { token: String },
ApiKey { key: String, header: String },
Basic { username: String, password: String },
OAuth2 { client_id: String, client_secret: String },
}
impl AuthStrategy {
pub fn apply_to_request(&self, request: &mut Request) -> Result<(), AuthError> {
match self {
AuthStrategy::Bearer { token } => {
request.headers_mut().insert(
"Authorization",
format!("Bearer {}", token).parse().unwrap(),
);
}
AuthStrategy::ApiKey { key, header } => {
request.headers_mut().insert(
header.as_str(),
key.parse().unwrap(),
);
}
AuthStrategy::Basic { username, password } => {
let encoded = base64::encode(format!("{}:{}", username, password));
request.headers_mut().insert(
"Authorization",
format!("Basic {}", encoded).parse().unwrap(),
);
}
AuthStrategy::OAuth2 { client_id, client_secret } => {
// OAuth2 implementation
self.handle_oauth2(request, client_id, client_secret)?;
}
}
Ok(())
}
fn handle_oauth2(&self, request: &mut Request, client_id: &str, client_secret: &str) -> Result<(), AuthError> {
// OAuth2 token exchange logic
todo!()
}
}
// ✅ Context that uses the strategy
pub struct HttpClient {
client: reqwest::Client,
auth_strategy: Option<AuthStrategy>,
}
impl HttpClient {
pub fn new() -> Self {
Self {
client: reqwest::Client::new(),
auth_strategy: None,
}
}
pub fn with_auth(mut self, strategy: AuthStrategy) -> Self {
self.auth_strategy = Some(strategy);
self
}
pub async fn request(&self, url: &str) -> Result<Response, HttpError> {
let mut request = self.client.get(url).build()?;
if let Some(ref auth) = self.auth_strategy {
auth.apply_to_request(&mut request)?;
}
let response = self.client.execute(request).await?;
Ok(response)
}
}
```
### Command Pattern with Undo
```rust
// ✅ Command pattern for operations with undo capability
pub trait Command {
type Error;
fn execute(&mut self) -> Result<(), Self::Error>;
fn undo(&mut self) -> Result<(), Self::Error>;
fn description(&self) -> &str;
}
#[derive(Debug)]
pub struct CreateUserCommand {
user_service: Arc<UserService>,
user_data: User,
created_user_id: Option<UserId>,
}
impl CreateUserCommand {
pub fn new(user_service: Arc<UserService>, user_data: User) -> Self {
Self {
user_service,
user_data,
created_user_id: None,
}
}
}
impl Command for CreateUserCommand {
type Error = UserServiceError;
fn execute(&mut self) -> Result<(), Self::Error> {
let user = self.user_service.create_user(&self.user_data)?;
self.created_user_id = Some(user.id);
Ok(())
}
fn undo(&mut self) -> Result<(), Self::Error> {
if let Some(user_id) = self.created_user_id.take() {
self.user_service.delete_user(user_id)?;
}
Ok(())
}
fn description(&self) -> &str {
"Create user"
}
}
// ✅ Command invoker with history
pub struct CommandHistory {
executed_commands: Vec<Box<dyn Command<Error = Box<dyn std::error::Error>>>>,
current_position: usize,
}
impl CommandHistory {
pub fn new() -> Self {
Self {
executed_commands: Vec::new(),
current_position: 0,
}
}
pub fn execute<C>(&mut self, mut command: C) -> Result<(), C::Error>
where
C: Command + 'static,
C::Error: Into<Box<dyn std::error::Error>>,
{
command.execute()?;
// Remove any commands after current position (when redoing after undo)
self.executed_commands.truncate(self.current_position);
// Add the new command
self.executed_commands.push(Box::new(command));
self.current_position += 1;
Ok(())
}
pub fn undo(&mut self) -> Result<(), Box<dyn std::error::Error>> {
if self.current_position > 0 {
self.current_position -= 1;
self.executed_commands[self.current_position].undo()?;
}
Ok(())
}
pub fn redo(&mut self) -> Result<(), Box<dyn std::error::Error>> {
if self.current_position < self.executed_commands.len() {
self.executed_commands[self.current_position].execute()?;
self.current_position += 1;
}
Ok(())
}
}
```
### Observer Pattern with Async
```rust
use tokio::sync::broadcast;
// ✅ Event-driven observer pattern with async support
#[derive(Debug, Clone)]
pub enum DomainEvent {
UserCreated { user_id: UserId, email: String },
UserUpdated { user_id: UserId, changes: Vec<String> },
UserDeleted { user_id: UserId },
OrderPlaced { order_id: OrderId, user_id: UserId, amount: Decimal },
}
#[async_trait::async_trait]
pub trait EventHandler {
async fn handle(&self, event: &DomainEvent) -> Result<(), EventError>;
fn interested_in(&self) -> Vec<std::mem::Discriminant<DomainEvent>>;
}
pub struct EmailNotificationHandler {
email_service: Arc<EmailService>,
}
#[async_trait::async_trait]
impl EventHandler for EmailNotificationHandler {
async fn handle(&self, event: &DomainEvent) -> Result<(), EventError> {
match event {
DomainEvent::UserCreated { email, .. } => {
self.email_service.send_welcome_email(email).await?;
}
DomainEvent::OrderPlaced { user_id, amount, .. } => {
let user = self.get_user(*user_id).await?;
self.email_service.send_order_confirmation(&user.email, *amount).await?;
}
_ => {}
}
Ok(())
}
fn interested_in(&self) -> Vec<std::mem::Discriminant<DomainEvent>> {
vec![
std::mem::discriminant(&DomainEvent::UserCreated { user_id: UserId::new(), email: String::new() }),
std::mem::discriminant(&DomainEvent::OrderPlaced {
order_id: OrderId::new(),
user_id: UserId::new(),
amount: Decimal::ZERO
}),
]
}
}
// ✅ Event bus for managing observers
pub struct EventBus {
sender: broadcast::Sender<DomainEvent>,
handlers: Vec<Arc<dyn EventHandler + Send + Sync>>,
}
impl EventBus {
pub fn new() -> Self {
let (sender, _) = broadcast::channel(1000);
Self {
sender,
handlers: Vec::new(),
}
}
pub fn subscribe(&mut self, handler: Arc<dyn EventHandler + Send + Sync>) {
self.handlers.push(handler);
}
pub async fn publish(&self, event: DomainEvent) -> Result<(), EventError> {
// Send to broadcast channel for other subscribers
let _ = self.sender.send(event.clone());
// Handle with registered handlers
for handler in &self.handlers {
let event_discriminant = std::mem::discriminant(&event);
if handler.interested_in().contains(&event_discriminant) {
if let Err(e) = handler.handle(&event).await {
tracing::error!("Event handler failed: {:?}", e);
// Continue with other handlers
}
}
}
Ok(())
}
pub fn subscribe_to_stream(&self) -> broadcast::Receiver<DomainEvent> {
self.sender.subscribe()
}
}
```
## 🏛️ STRUCTURAL PATTERNS
### Adapter Pattern for External APIs
```rust
// ✅ Adapter pattern for integrating different payment providers
#[async_trait::async_trait]
pub trait PaymentProcessor {
async fn process_payment(&self, payment: &Payment) -> Result<PaymentResult, PaymentError>;
async fn refund_payment(&self, payment_id: &str, amount: Option<Decimal>) -> Result<RefundResult, PaymentError>;
}
// External Stripe API (different interface)
pub struct StripeClient {
// Stripe-specific implementation
}
impl StripeClient {
pub async fn charge(&self, amount_cents: u64, token: &str) -> Result<StripeCharge, StripeError> {
// Stripe-specific charge logic
}
pub async fn create_refund(&self, charge_id: &str, amount_cents: Option<u64>) -> Result<StripeRefund, StripeError> {
// Stripe-specific refund logic
}
}
// ✅ Adapter to make Stripe compatible with our interface
pub struct StripeAdapter {
client: StripeClient,
}
impl StripeAdapter {
pub fn new(client: StripeClient) -> Self {
Self { client }
}
}
#[async_trait::async_trait]
impl PaymentProcessor for StripeAdapter {
async fn process_payment(&self, payment: &Payment) -> Result<PaymentResult, PaymentError> {
let amount_cents = (payment.amount * 100).to_u64().ok_or(PaymentError::InvalidAmount)?;
let charge = self.client
.charge(amount_cents, &payment.token)
.await
.map_err(|e| PaymentError::ProviderError(e.to_string()))?;
Ok(PaymentResult {
id: charge.id,
status: match charge.status.as_str() {
"succeeded" => PaymentStatus::Completed,
"pending" => PaymentStatus::Pending,
"failed" => PaymentStatus::Failed,
_ => PaymentStatus::Unknown,
},
amount: payment.amount,
fees: charge.fees.map(|f| Decimal::from(f) / 100),
})
}
async fn refund_payment(&self, payment_id: &str, amount: Option<Decimal>) -> Result<RefundResult, PaymentError> {
let amount_cents = amount.map(|a| (a * 100).to_u64().unwrap());
let refund = self.client
.create_refund(payment_id, amount_cents)
.await
.map_err(|e| PaymentError::ProviderError(e.to_string()))?;
Ok(RefundResult {
id: refund.id,
amount: Decimal::from(refund.amount) / 100,
status: RefundStatus::Completed,
})
}
}
// ✅ Similar adapter for PayPal
pub struct PayPalAdapter {
client: PayPalClient,
}
#[async_trait::async_trait]
impl PaymentProcessor for PayPalAdapter {
async fn process_payment(&self, payment: &Payment) -> Result<PaymentResult, PaymentError> {
// PayPal-specific implementation
}
async fn refund_payment(&self, payment_id: &str, amount: Option<Decimal>) -> Result<RefundResult, PaymentError> {
// PayPal-specific implementation
}
}
// ✅ Payment service using any adapter
pub struct PaymentService {
processor: Arc<dyn PaymentProcessor + Send + Sync>,
}
impl PaymentService {
pub fn new(processor: Arc<dyn PaymentProcessor + Send + Sync>) -> Self {
Self { processor }
}
pub async fn charge_customer(&self, payment: Payment) -> Result<PaymentResult, PaymentError> {
self.processor.process_payment(&payment).await
}
}
```
### Decorator Pattern with Middleware
```rust
// ✅ Decorator pattern for HTTP middleware
#[async_trait::async_trait]
pub trait HttpHandler {
async fn handle(&self, request: Request) -> Result<Response, HttpError>;
}
// Base handler
pub struct BaseHandler;
#[async_trait::async_trait]
impl HttpHandler for BaseHandler {
async fn handle(&self, request: Request) -> Result<Response, HttpError> {
// Basic request handling
Ok(Response::new("Hello World".into()))
}
}
// ✅ Logging decorator
pub struct LoggingDecorator<H: HttpHandler> {
inner: H,
}
impl<H: HttpHandler> LoggingDecorator<H> {
pub fn new(inner: H) -> Self {
Self { inner }
}
}
#[async_trait::async_trait]
impl<H: HttpHandler + Send + Sync> HttpHandler for LoggingDecorator<H> {
async fn handle(&self, request: Request) -> Result<Response, HttpError> {
let start = std::time::Instant::now();
let method = request.method().clone();
let uri = request.uri().clone();
tracing::info!("Incoming request: {} {}", method, uri);
let result = self.inner.handle(request).await;
let duration = start.elapsed();
match &result {
Ok(response) => {
tracing::info!(
"Request completed: {} {} -> {} in {:?}",
method, uri, response.status(), duration
);
}
Err(e) => {
tracing::error!(
"Request failed: {} {} -> {:?} in {:?}",
method, uri, e, duration
);
}
}
result
}
}
// ✅ Rate limiting decorator
pub struct RateLimitDecorator<H: HttpHandler> {
inner: H,
rate_limiter: Arc<RateLimiter>,
}
impl<H: HttpHandler> RateLimitDecorator<H> {
pub fn new(inner: H, rate_limiter: Arc<RateLimiter>) -> Self {
Self { inner, rate_limiter }
}
}
#[async_trait::async_trait]
impl<H: HttpHandler + Send + Sync> HttpHandler for RateLimitDecorator<H> {
async fn handle(&self, request: Request) -> Result<Response, HttpError> {
let client_ip = extract_client_ip(&request)?;
self.rate_limiter.check_rate_limit(&client_ip).await
.map_err(|_| HttpError::RateLimited)?;
self.inner.handle(request).await
}
}
// ✅ Composition of decorators
let handler = RateLimitDecorator::new(
LoggingDecorator::new(
BaseHandler
),
rate_limiter
);
```
## 🧵 CONCURRENCY PATTERNS
### Actor Pattern with Tokio
```rust
use tokio::sync::{mpsc, oneshot};
// ✅ Actor pattern for managing state with message passing
#[derive(Debug)]
pub enum UserActorMessage {
GetUser {
user_id: UserId,
respond_to: oneshot::Sender<Result<User, UserError>>,
},
UpdateUser {
user_id: UserId,
updates: UserUpdates,
respond_to: oneshot::Sender<Result<User, UserError>>,
},
DeleteUser {
user_id: UserId,
respond_to: oneshot::Sender<Result<(), UserError>>,
},
}
pub struct UserActor {
receiver: mpsc::Receiver<UserActorMessage>,
repository: Arc<dyn UserRepository + Send + Sync>,
cache: DashMap<UserId, User>,
}
impl UserActor {
pub fn new(repository: Arc<dyn UserRepository + Send + Sync>) -> (Self, UserActorHandle) {
let (sender, receiver) = mpsc::channel(100);
let actor = Self {
receiver,
repository,
cache: DashMap::new(),
};
let handle = UserActorHandle { sender };
(actor, handle)
}
pub async fn run(mut self) {
while let Some(msg) = self.receiver.recv().await {
self.handle_message(msg).await;
}
}
async fn handle_message(&mut self, msg: UserActorMessage) {
match msg {
UserActorMessage::GetUser { user_id, respond_to } => {
let result = self.get_user_internal(user_id).await;
let _ = respond_to.send(result);
}
UserActorMessage::UpdateUser { user_id, updates, respond_to } => {
let result = self.update_user_internal(user_id, updates).await;
let _ = respond_to.send(result);
}
UserActorMessage::DeleteUser { user_id, respond_to } => {
let result = self.delete_user_internal(user_id).await;
let _ = respond_to.send(result);
}
}
}
async fn get_user_internal(&self, user_id: UserId) -> Result<User, UserError> {
// Check cache first
if let Some(user) = self.cache.get(&user_id) {
return Ok(user.clone());
}
// Fetch from repository
let user = self.repository.find_by_id(user_id).await?
.ok_or(UserError::NotFound { user_id })?;
// Cache the result
self.cache.insert(user_id, user.clone());
Ok(user)
}
async fn update_user_internal(&mut self, user_id: UserId, updates: UserUpdates) -> Result<User, UserError> {
let updated_user = self.repository.update(user_id, updates).await?;
// Update cache
self.cache.insert(user_id, updated_user.clone());
Ok(updated_user)
}
async fn delete_user_internal(&mut self, user_id: UserId) -> Result<(), UserError> {
self.repository.delete(user_id).await?;
// Remove from cache
self.cache.remove(&user_id);
Ok(())
}
}
// ✅ Handle for communicating with the actor
#[derive(Clone)]
pub struct UserActorHandle {
sender: mpsc::Sender<UserActorMessage>,
}
impl UserActorHandle {
pub async fn get_user(&self, user_id: UserId) -> Result<User, UserError> {
let (respond_to, response) = oneshot::channel();
self.sender
.send(UserActorMessage::GetUser { user_id, respond_to })
.await
.map_err(|_| UserError::ActorUnavailable)?;
response.await
.map_err(|_| UserError::ActorUnavailable)?
}
pub async fn update_user(&self, user_id: UserId, updates: UserUpdates) -> Result<User, UserError> {
let (respond_to, response) = oneshot::channel();
self.sender
.send(UserActorMessage::UpdateUser { user_id, updates, respond_to })
.await
.map_err(|_| UserError::ActorUnavailable)?;
response.await
.map_err(|_| UserError::ActorUnavailable)?
}
}
// ✅ Starting the actor system
pub async fn start_user_actor(repository: Arc<dyn UserRepository + Send + Sync>) -> UserActorHandle {
let (actor, handle) = UserActor::new(repository);
tokio::spawn(async move {
actor.run().await;
});
handle
}
```
## ✅ DESIGN PATTERNS CHECKLIST
```markdown
### Design Patterns Implementation Verification
- [ ] Builder pattern used for complex configuration objects
- [ ] Factory pattern for creating related object families
- [ ] Strategy pattern for runtime algorithm selection
- [ ] Command pattern for operations requiring undo/redo
- [ ] Observer pattern for event-driven architecture
- [ ] Adapter pattern for external API integration
- [ ] Decorator pattern for cross-cutting concerns
- [ ] Actor pattern for concurrent state management
- [ ] Type-state pattern for compile-time validation
- [ ] Repository pattern for data access abstraction
- [ ] Dependency injection for testability
- [ ] Event sourcing for audit trails (when applicable)
- [ ] CQRS separation for read/write operations (when applicable)
- [ ] Circuit breaker for resilience patterns
- [ ] Retry pattern with exponential backoff
```
This design patterns guide provides battle-tested solutions for common architectural challenges in Rust applications.

View File

@@ -0,0 +1,638 @@
---
description:
globs:
alwaysApply: false
---
# ⚡ RUST PERFORMANCE OPTIMIZATION
> **TL;DR:** Performance optimization strategies for Rust applications, focusing on zero-cost abstractions, memory management, and profiling-driven optimization.
## 🔍 PERFORMANCE OPTIMIZATION STRATEGY
```mermaid
graph TD
Start["Performance Issue"] --> Measure["Profile & Measure"]
Measure --> Bottleneck{"Bottleneck<br>Type?"}
Bottleneck -->|CPU| CPUOpt["CPU Optimization"]
Bottleneck -->|Memory| MemOpt["Memory Optimization"]
Bottleneck -->|I/O| IOOpt["I/O Optimization"]
Bottleneck -->|Concurrency| ConcOpt["Concurrency Optimization"]
CPUOpt --> SIMD["SIMD Vectorization"]
CPUOpt --> Algorithms["Algorithm Optimization"]
CPUOpt --> CompileTime["Compile-Time Optimization"]
MemOpt --> Allocation["Allocation Strategy"]
MemOpt --> DataStructure["Data Structure Choice"]
MemOpt --> Caching["Caching Patterns"]
IOOpt --> Buffering["Buffering Strategy"]
IOOpt --> AsyncIO["Async I/O Patterns"]
IOOpt --> Batching["Request Batching"]
ConcOpt --> Parallelism["Parallel Processing"]
ConcOpt --> Channels["Channel Optimization"]
ConcOpt --> LockFree["Lock-Free Structures"]
SIMD --> Verify["Benchmark & Verify"]
Algorithms --> Verify
CompileTime --> Verify
Allocation --> Verify
DataStructure --> Verify
Caching --> Verify
Buffering --> Verify
AsyncIO --> Verify
Batching --> Verify
Parallelism --> Verify
Channels --> Verify
LockFree --> Verify
style Start fill:#4da6ff,stroke:#0066cc,color:white
style Measure fill:#ffa64d,stroke:#cc7a30,color:white
style CPUOpt fill:#4dbb5f,stroke:#36873f,color:white
style MemOpt fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 PERFORMANCE PRINCIPLES
### Measure First, Optimize Second
```rust
// ✅ Always profile before optimizing
use std::time::Instant;
#[cfg(feature = "profiling")]
macro_rules! time_it {
($name:expr, $block:block) => {{
let start = Instant::now();
let result = $block;
let duration = start.elapsed();
tracing::info!("{} took {:?}", $name, duration);
result
}};
}
#[cfg(not(feature = "profiling"))]
macro_rules! time_it {
($name:expr, $block:block) => {
$block
};
}
// Usage
fn process_data(data: &[u8]) -> Vec<u8> {
time_it!("process_data", {
// Expensive computation here
data.iter().map(|&b| b.wrapping_mul(2)).collect()
})
}
```
## 🏗️ MEMORY OPTIMIZATION
### String and Allocation Management
```rust
use std::borrow::Cow;
// ✅ Use Cow for flexible string handling
pub fn process_text<'a>(input: &'a str) -> Cow<'a, str> {
if input.contains("old") {
Cow::Owned(input.replace("old", "new"))
} else {
Cow::Borrowed(input)
}
}
// ✅ Pre-allocate with known capacity
pub fn build_large_string(items: &[&str]) -> String {
let total_len = items.iter().map(|s| s.len()).sum::<usize>();
let mut result = String::with_capacity(total_len + items.len() - 1);
for (i, item) in items.iter().enumerate() {
if i > 0 {
result.push(' ');
}
result.push_str(item);
}
result
}
// ✅ Use Vec::with_capacity for known sizes
pub fn process_numbers(count: usize) -> Vec<i32> {
let mut result = Vec::with_capacity(count);
for i in 0..count {
result.push(i as i32 * 2);
}
result
}
// ❌ Avoid repeated allocations
// fn bad_string_building(items: &[&str]) -> String {
// let mut result = String::new();
// for item in items {
// result = result + item + " "; // New allocation each time
// }
// result
// }
```
### Smart Pointer Optimization
```rust
use std::rc::Rc;
use std::sync::Arc;
// ✅ Use Rc for single-threaded shared ownership
#[derive(Debug, Clone)]
pub struct ConfigManager {
config: Rc<Config>,
}
impl ConfigManager {
pub fn new(config: Config) -> Self {
Self {
config: Rc::new(config),
}
}
// Cheap to clone - only increments reference count
pub fn get_config(&self) -> Rc<Config> {
self.config.clone()
}
}
// ✅ Use Arc for multi-threaded scenarios
#[derive(Debug, Clone)]
pub struct ThreadSafeCache {
data: Arc<DashMap<String, Vec<u8>>>,
}
// ✅ Pool expensive objects
pub struct ConnectionPool {
connections: Vec<DatabaseConnection>,
available: std::collections::VecDeque<usize>,
}
impl ConnectionPool {
pub async fn get_connection(&mut self) -> Option<PooledConnection> {
if let Some(index) = self.available.pop_front() {
Some(PooledConnection {
connection: &mut self.connections[index],
pool_index: index,
})
} else {
None
}
}
}
```
## 🔄 ITERATION OPTIMIZATION
### Iterator Patterns
```rust
// ✅ Chain iterators for efficiency
pub fn process_and_filter(data: &[i32]) -> Vec<i32> {
data.iter()
.filter(|&&x| x > 0)
.map(|&x| x * 2)
.filter(|&x| x < 1000)
.collect()
}
// ✅ Use fold for accumulation
pub fn sum_of_squares(numbers: &[i32]) -> i64 {
numbers
.iter()
.map(|&x| x as i64)
.map(|x| x * x)
.fold(0, |acc, x| acc + x)
}
// ✅ Parallel iteration with rayon
use rayon::prelude::*;
pub fn parallel_process(data: &[f64]) -> Vec<f64> {
data.par_iter()
.map(|&x| expensive_computation(x))
.collect()
}
fn expensive_computation(x: f64) -> f64 {
// CPU-intensive operation
x.powi(3) + x.powi(2) + x + 1.0
}
// ❌ Avoid collecting intermediate results
// fn inefficient_processing(data: &[i32]) -> Vec<i32> {
// let filtered: Vec<_> = data.iter().filter(|&&x| x > 0).collect();
// let mapped: Vec<_> = filtered.iter().map(|&x| x * 2).collect();
// mapped.into_iter().filter(|&x| x < 1000).collect()
// }
```
### Custom Iterator Implementation
```rust
// ✅ Implement efficient custom iterators
pub struct ChunkIterator<'a, T> {
data: &'a [T],
chunk_size: usize,
position: usize,
}
impl<'a, T> ChunkIterator<'a, T> {
pub fn new(data: &'a [T], chunk_size: usize) -> Self {
Self {
data,
chunk_size,
position: 0,
}
}
}
impl<'a, T> Iterator for ChunkIterator<'a, T> {
type Item = &'a [T];
fn next(&mut self) -> Option<Self::Item> {
if self.position >= self.data.len() {
return None;
}
let end = std::cmp::min(self.position + self.chunk_size, self.data.len());
let chunk = &self.data[self.position..end];
self.position = end;
Some(chunk)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = (self.data.len() - self.position + self.chunk_size - 1) / self.chunk_size;
(remaining, Some(remaining))
}
}
impl<'a, T> ExactSizeIterator for ChunkIterator<'a, T> {}
```
## 🧮 COMPUTATIONAL OPTIMIZATION
### Vectorization and SIMD
```rust
// ✅ Use SIMD when available
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
pub fn sum_f32_slice(values: &[f32]) -> f32 {
if is_x86_feature_detected!("avx2") {
unsafe { sum_f32_avx2(values) }
} else {
values.iter().sum()
}
}
#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "avx2")]
unsafe fn sum_f32_avx2(values: &[f32]) -> f32 {
let mut sum = _mm256_setzero_ps();
let chunks = values.chunks_exact(8);
let remainder = chunks.remainder();
for chunk in chunks {
let v = _mm256_loadu_ps(chunk.as_ptr());
sum = _mm256_add_ps(sum, v);
}
// Extract the sum from the vector
let mut result = [0.0f32; 8];
_mm256_storeu_ps(result.as_mut_ptr(), sum);
let vector_sum: f32 = result.iter().sum();
// Add remainder
vector_sum + remainder.iter().sum::<f32>()
}
```
### Lookup Tables and Memoization
```rust
use std::collections::HashMap;
// ✅ Use lookup tables for expensive computations
pub struct FibonacciCalculator {
cache: HashMap<u64, u64>,
}
impl FibonacciCalculator {
pub fn new() -> Self {
let mut cache = HashMap::new();
cache.insert(0, 0);
cache.insert(1, 1);
Self { cache }
}
pub fn fibonacci(&mut self, n: u64) -> u64 {
if let Some(&result) = self.cache.get(&n) {
return result;
}
let result = self.fibonacci(n - 1) + self.fibonacci(n - 2);
self.cache.insert(n, result);
result
}
}
// ✅ Pre-computed lookup tables
pub struct SinTable {
table: Vec<f64>,
resolution: f64,
}
impl SinTable {
pub fn new(resolution: usize) -> Self {
let table: Vec<f64> = (0..resolution)
.map(|i| {
let angle = (i as f64) * 2.0 * std::f64::consts::PI / (resolution as f64);
angle.sin()
})
.collect();
Self {
table,
resolution: resolution as f64,
}
}
pub fn sin_approx(&self, angle: f64) -> f64 {
let normalized = angle % (2.0 * std::f64::consts::PI);
let index = (normalized * self.resolution / (2.0 * std::f64::consts::PI)) as usize;
self.table[index.min(self.table.len() - 1)]
}
}
```
## 🔧 ASYNC PERFORMANCE
### Async Optimization Patterns
```rust
use tokio::task::JoinSet;
use futures::future::{join_all, try_join_all};
// ✅ Batch async operations
pub async fn fetch_user_data_batch(user_ids: &[UserId]) -> Result<Vec<User>, ServiceError> {
const BATCH_SIZE: usize = 50;
let mut results = Vec::with_capacity(user_ids.len());
for chunk in user_ids.chunks(BATCH_SIZE) {
let futures = chunk.iter().map(|&id| fetch_user_data(id));
let batch_results = try_join_all(futures).await?;
results.extend(batch_results);
}
Ok(results)
}
// ✅ Use bounded channels to prevent memory issues
pub async fn process_stream_with_backpressure() -> Result<(), ProcessingError> {
let (tx, mut rx) = tokio::sync::mpsc::channel(100); // Bounded channel
// Producer task
tokio::spawn(async move {
for i in 0..1000 {
if tx.send(i).await.is_err() {
break;
}
// Producer will block when channel is full
}
});
// Consumer task
while let Some(item) = rx.recv().await {
process_item(item).await?;
}
Ok(())
}
// ✅ Optimize async task spawning
pub async fn parallel_processing_optimized(items: Vec<ProcessingItem>) -> Vec<ProcessedResult> {
let mut join_set = JoinSet::new();
let concurrency_limit = num_cpus::get();
for chunk in items.chunks(items.len() / concurrency_limit + 1) {
let chunk = chunk.to_vec();
join_set.spawn(async move {
chunk.into_iter().map(process_item_sync).collect::<Vec<_>>()
});
}
let mut results = Vec::new();
while let Some(result) = join_set.join_next().await {
if let Ok(chunk_results) = result {
results.extend(chunk_results);
}
}
results
}
```
## 📊 PROFILING AND BENCHMARKING
### Benchmarking with Criterion
```rust
// Cargo.toml
// [dev-dependencies]
// criterion = { version = "0.5", features = ["html_reports"] }
#[cfg(test)]
mod benches {
use super::*;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
fn bench_string_concatenation(c: &mut Criterion) {
let data = vec!["hello"; 1000];
c.bench_function("string_concat_push", |b| {
b.iter(|| {
let mut result = String::new();
for s in &data {
result.push_str(black_box(s));
}
result
})
});
c.bench_function("string_concat_join", |b| {
b.iter(|| data.join(""))
});
c.bench_function("string_concat_capacity", |b| {
b.iter(|| {
let mut result = String::with_capacity(data.len() * 5);
for s in &data {
result.push_str(black_box(s));
}
result
})
});
}
criterion_group!(benches, bench_string_concatenation);
criterion_main!(benches);
}
```
### Memory Profiling
```rust
// Use instruments on macOS or valgrind on Linux
#[cfg(feature = "profiling")]
pub fn memory_intensive_operation() {
// Add memory tracking
let start_memory = get_memory_usage();
// Your operation here
let result = expensive_operation();
let end_memory = get_memory_usage();
println!("Memory used: {} bytes", end_memory - start_memory);
}
#[cfg(feature = "profiling")]
fn get_memory_usage() -> usize {
// Platform-specific memory usage detection
#[cfg(target_os = "linux")]
{
use std::fs;
if let Ok(contents) = fs::read_to_string("/proc/self/status") {
for line in contents.lines() {
if line.starts_with("VmRSS:") {
if let Some(kb) = line.split_whitespace().nth(1) {
return kb.parse::<usize>().unwrap_or(0) * 1024;
}
}
}
}
}
0
}
```
## 🚨 PERFORMANCE ANTI-PATTERNS
### What to Avoid
```rust
// ❌ Don't clone unnecessarily
// fn bad_function(data: Vec<String>) -> Vec<String> {
// data.clone() // Unnecessary clone
// }
// ✅ Take ownership or borrow
fn good_function(data: Vec<String>) -> Vec<String> {
data // Move ownership
}
// ❌ Don't use Vec when you need Set operations
// fn slow_contains(vec: &Vec<String>, item: &str) -> bool {
// vec.iter().any(|s| s == item) // O(n) lookup
// }
// ✅ Use appropriate data structures
use std::collections::HashSet;
fn fast_contains(set: &HashSet<String>, item: &str) -> bool {
set.contains(item) // O(1) lookup
}
// ❌ Don't collect unnecessarily
// fn wasteful_processing(data: &[i32]) -> i32 {
// data.iter()
// .filter(|&&x| x > 0)
// .collect::<Vec<_>>() // Unnecessary allocation
// .iter()
// .sum()
// }
// ✅ Chain operations
fn efficient_processing(data: &[i32]) -> i32 {
data.iter()
.filter(|&&x| x > 0)
.sum()
}
```
## 🎯 COMPILE-TIME OPTIMIZATION
### Cargo.toml Optimizations
```toml
[profile.release]
lto = true # Link-time optimization
codegen-units = 1 # Better optimization at cost of compile time
panic = "abort" # Smaller binary size
strip = true # Remove debug symbols
[profile.release-with-debug]
inherits = "release"
debug = true # Keep debug info for profiling
# CPU-specific optimizations
[profile.release]
rustflags = ["-C", "target-cpu=native"]
```
### Feature Gates for Performance
```rust
// Cargo.toml
// [features]
// simd = []
// parallel = ["rayon"]
#[cfg(feature = "simd")]
pub fn fast_sum(data: &[f32]) -> f32 {
sum_f32_slice(data)
}
#[cfg(not(feature = "simd"))]
pub fn fast_sum(data: &[f32]) -> f32 {
data.iter().sum()
}
#[cfg(feature = "parallel")]
pub fn parallel_map<T, U, F>(data: &[T], f: F) -> Vec<U>
where
T: Sync,
U: Send,
F: Fn(&T) -> U + Sync,
{
use rayon::prelude::*;
data.par_iter().map(f).collect()
}
#[cfg(not(feature = "parallel"))]
pub fn parallel_map<T, U, F>(data: &[T], f: F) -> Vec<U>
where
F: Fn(&T) -> U,
{
data.iter().map(f).collect()
}
```
## ✅ PERFORMANCE CHECKLIST
```markdown
### Performance Implementation Verification
- [ ] Profile before optimizing (use criterion for benchmarks)
- [ ] Pre-allocate collections with known capacity
- [ ] Use appropriate data structures (HashMap vs Vec for lookups)
- [ ] Leverage iterator chains instead of intermediate collections
- [ ] Consider parallel processing for CPU-intensive tasks
- [ ] Use Cow for flexible string handling
- [ ] Implement object pooling for expensive resources
- [ ] Use SIMD when appropriate and available
- [ ] Optimize async task spawning and batching
- [ ] Enable LTO and appropriate optimization flags
- [ ] Use bounded channels to prevent memory issues
- [ ] Implement memoization for expensive computations
- [ ] Choose between Arc/Rc based on threading needs
- [ ] Avoid unnecessary clones and allocations
- [ ] Use const generics for compile-time optimizations
```
This performance guide provides practical optimization strategies while maintaining Rust's safety guarantees and zero-cost abstraction principles.

View File

@@ -0,0 +1,744 @@
---
description:
globs:
alwaysApply: false
---
# 🔐 RUST SECURITY BEST PRACTICES
> **TL;DR:** Security-focused programming patterns for Rust applications, covering input validation, cryptography, secrets management, and secure coding practices.
## 🔍 SECURITY IMPLEMENTATION STRATEGY
```mermaid
graph TD
Start["Security Assessment"] --> ThreatModel["Threat Modeling"]
ThreatModel --> InputSecurity{"Input<br>Validation?"}
ThreatModel --> AuthSecurity{"Authentication<br>Required?"}
ThreatModel --> DataSecurity{"Data<br>Protection?"}
ThreatModel --> AccessSecurity{"Access<br>Control?"}
InputSecurity -->|Yes| Validation["Input Validation"]
InputSecurity -->|No| InputDone["✓"]
AuthSecurity -->|Yes| PasswordHash["Password Hashing"]
AuthSecurity -->|No| AuthDone["✓"]
DataSecurity -->|Yes| Encryption["Data Encryption"]
DataSecurity -->|No| DataDone["✓"]
AccessSecurity -->|Yes| RBAC["Role-Based Access Control"]
AccessSecurity -->|No| AccessDone["✓"]
Validation --> PathTraversal["Path Traversal Prevention"]
PathTraversal --> SQLInjection["SQL Injection Prevention"]
PasswordHash --> Argon2["Argon2 Implementation"]
Argon2 --> JWT["JWT Token Security"]
Encryption --> SecretsManagement["Secrets Management"]
SecretsManagement --> AESGCMEncryption["AES-GCM Encryption"]
RBAC --> RateLimiting["Rate Limiting"]
RateLimiting --> Audit["Security Audit Logging"]
SQLInjection --> SecurityDone["Security Verified"]
JWT --> SecurityDone
AESGCMEncryption --> SecurityDone
Audit --> SecurityDone
InputDone --> SecurityDone
AuthDone --> SecurityDone
DataDone --> SecurityDone
AccessDone --> SecurityDone
style Start fill:#4da6ff,stroke:#0066cc,color:white
style ThreatModel fill:#ffa64d,stroke:#cc7a30,color:white
style Argon2 fill:#4dbb5f,stroke:#36873f,color:white
style SecurityDone fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 SECURITY PRINCIPLES
### Input Validation and Sanitization
```rust
use validator::{Validate, ValidationError};
use regex::Regex;
use std::collections::HashSet;
// ✅ Always validate and sanitize user input
#[derive(Debug, Clone, Validate)]
pub struct UserRegistration {
#[validate(email, message = "Invalid email format")]
pub email: String,
#[validate(length(min = 8, max = 128, message = "Password must be 8-128 characters"))]
#[validate(custom = "validate_password_strength")]
pub password: String,
#[validate(length(min = 2, max = 50, message = "Username must be 2-50 characters"))]
#[validate(regex = "USERNAME_REGEX", message = "Username contains invalid characters")]
pub username: String,
}
lazy_static::lazy_static! {
static ref USERNAME_REGEX: Regex = Regex::new(r"^[a-zA-Z0-9_-]+$").unwrap();
static ref FORBIDDEN_PASSWORDS: HashSet<&'static str> = {
let mut set = HashSet::new();
set.insert("password");
set.insert("123456");
set.insert("admin");
set.insert("qwerty");
set
};
}
fn validate_password_strength(password: &str) -> Result<(), ValidationError> {
// Check for forbidden passwords
if FORBIDDEN_PASSWORDS.contains(&password.to_lowercase().as_str()) {
return Err(ValidationError::new("forbidden_password"));
}
// Require at least one uppercase, lowercase, digit, and special character
let has_upper = password.chars().any(|c| c.is_uppercase());
let has_lower = password.chars().any(|c| c.is_lowercase());
let has_digit = password.chars().any(|c| c.is_numeric());
let has_special = password.chars().any(|c| "!@#$%^&*()_+-=[]{}|;:,.<>?".contains(c));
if !(has_upper && has_lower && has_digit && has_special) {
return Err(ValidationError::new("weak_password"));
}
Ok(())
}
// ✅ SQL injection prevention with parameterized queries
pub async fn find_user_by_email(
pool: &sqlx::PgPool,
email: &str,
) -> Result<Option<User>, sqlx::Error> {
// ✅ Safe: Uses parameterized query
sqlx::query_as::<_, User>(
"SELECT id, email, username FROM users WHERE email = $1"
)
.bind(email)
.fetch_optional(pool)
.await
}
// ❌ NEVER: String interpolation vulnerable to SQL injection
// let query = format!("SELECT * FROM users WHERE email = '{}'", email);
```
### Path Traversal Prevention
```rust
use std::path::{Path, PathBuf};
// ✅ Safe file path handling
pub fn safe_file_access(base_dir: &Path, user_path: &str) -> Result<PathBuf, SecurityError> {
// Normalize and resolve the path
let requested_path = base_dir.join(user_path);
let canonical_path = requested_path.canonicalize()
.map_err(|_| SecurityError::InvalidPath)?;
// Ensure the canonical path is within the base directory
if !canonical_path.starts_with(base_dir) {
return Err(SecurityError::PathTraversal);
}
Ok(canonical_path)
}
// ✅ File upload with validation
pub async fn upload_file(
file_data: &[u8],
filename: &str,
upload_dir: &Path,
) -> Result<PathBuf, SecurityError> {
// Validate filename
if filename.contains("..") || filename.contains('/') || filename.contains('\\') {
return Err(SecurityError::InvalidFilename);
}
// Check file size
const MAX_FILE_SIZE: usize = 10 * 1024 * 1024; // 10MB
if file_data.len() > MAX_FILE_SIZE {
return Err(SecurityError::FileTooLarge);
}
// Validate file type by magic bytes
let file_type = detect_file_type(file_data)?;
if !is_allowed_file_type(&file_type) {
return Err(SecurityError::DisallowedFileType);
}
// Generate safe filename
let safe_filename = sanitize_filename(filename);
let file_path = upload_dir.join(safe_filename);
tokio::fs::write(&file_path, file_data).await
.map_err(|_| SecurityError::FileWriteError)?;
Ok(file_path)
}
fn sanitize_filename(filename: &str) -> String {
filename
.chars()
.filter(|c| c.is_alphanumeric() || *c == '.' || *c == '-' || *c == '_')
.collect()
}
```
## 🔑 CRYPTOGRAPHY AND HASHING
### Password Hashing with Argon2
```rust
use argon2::{Argon2, PasswordHash, PasswordHasher, PasswordVerifier};
use argon2::password_hash::{rand_core::OsRng, SaltString};
// ✅ Secure password hashing with Argon2 (recommended)
pub struct PasswordService;
impl PasswordService {
pub fn hash_password(password: &str) -> Result<String, SecurityError> {
let salt = SaltString::generate(&mut OsRng);
// Use Argon2id (default) with recommended parameters
let argon2 = Argon2::default();
let password_hash = argon2
.hash_password(password.as_bytes(), &salt)
.map_err(|_| SecurityError::HashingError)?;
Ok(password_hash.to_string())
}
pub fn verify_password(password: &str, hash: &str) -> Result<bool, SecurityError> {
let parsed_hash = PasswordHash::new(hash)
.map_err(|_| SecurityError::InvalidHash)?;
let argon2 = Argon2::default();
Ok(argon2.verify_password(password.as_bytes(), &parsed_hash).is_ok())
}
// ✅ Custom Argon2 configuration for high-security applications
pub fn hash_password_high_security(password: &str) -> Result<String, SecurityError> {
use argon2::{Algorithm, Params, Version};
let salt = SaltString::generate(&mut OsRng);
// Custom parameters for higher security (adjust based on performance requirements)
let params = Params::new(
65536, // m_cost (memory cost) - 64 MB
3, // t_cost (time cost) - 3 iterations
4, // p_cost (parallelism) - 4 threads
Some(32) // output length
).map_err(|_| SecurityError::HashingError)?;
let argon2 = Argon2::new(Algorithm::Argon2id, Version::V0x13, params);
let password_hash = argon2
.hash_password(password.as_bytes(), &salt)
.map_err(|_| SecurityError::HashingError)?;
Ok(password_hash.to_string())
}
}
```
### JWT Token Security
```rust
use jsonwebtoken::{encode, decode, Header, Algorithm, Validation, EncodingKey, DecodingKey};
use serde::{Deserialize, Serialize};
use chrono::{DateTime, Utc, Duration};
#[derive(Debug, Serialize, Deserialize)]
pub struct Claims {
pub sub: String, // Subject (user ID)
pub exp: i64, // Expiration time
pub iat: i64, // Issued at
pub jti: String, // JWT ID for revocation
pub scope: Vec<String>, // User permissions
}
pub struct JwtService {
encoding_key: EncodingKey,
decoding_key: DecodingKey,
algorithm: Algorithm,
}
impl JwtService {
pub fn new(secret: &[u8]) -> Self {
Self {
encoding_key: EncodingKey::from_secret(secret),
decoding_key: DecodingKey::from_secret(secret),
algorithm: Algorithm::HS256,
}
}
pub fn create_token(&self, user_id: &str, scopes: Vec<String>) -> Result<String, SecurityError> {
let now = Utc::now();
let expiration = now + Duration::hours(24);
let claims = Claims {
sub: user_id.to_string(),
exp: expiration.timestamp(),
iat: now.timestamp(),
jti: uuid::Uuid::new_v4().to_string(),
scope: scopes,
};
let mut header = Header::new(self.algorithm);
header.kid = Some("1".to_string()); // Key ID for key rotation
encode(&header, &claims, &self.encoding_key)
.map_err(|_| SecurityError::TokenCreationError)
}
pub fn verify_token(&self, token: &str) -> Result<Claims, SecurityError> {
let mut validation = Validation::new(self.algorithm);
validation.validate_exp = true;
validation.validate_nbf = true;
let token_data = decode::<Claims>(token, &self.decoding_key, &validation)
.map_err(|_| SecurityError::InvalidToken)?;
Ok(token_data.claims)
}
}
```
## 🔒 SECRETS MANAGEMENT
### Environment Variable Security
```rust
use std::env;
use zeroize::Zeroize;
// ✅ Secure secret handling
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct Secret {
value: String,
}
impl Secret {
pub fn from_env(key: &str) -> Result<Self, SecurityError> {
let value = env::var(key)
.map_err(|_| SecurityError::MissingSecret)?;
if value.is_empty() {
return Err(SecurityError::EmptySecret);
}
Ok(Self { value })
}
pub fn as_bytes(&self) -> &[u8] {
self.value.as_bytes()
}
// ❌ Never implement Display or Debug for secrets
// This prevents accidental logging
}
// ✅ Configuration with secure defaults
#[derive(Debug)]
pub struct SecurityConfig {
pub jwt_secret: Secret,
pub database_url: Secret,
pub encryption_key: Secret,
pub session_timeout: Duration,
pub max_login_attempts: u32,
pub rate_limit_per_minute: u32,
}
impl SecurityConfig {
pub fn from_env() -> Result<Self, SecurityError> {
Ok(Self {
jwt_secret: Secret::from_env("JWT_SECRET")?,
database_url: Secret::from_env("DATABASE_URL")?,
encryption_key: Secret::from_env("ENCRYPTION_KEY")?,
session_timeout: Duration::minutes(
env::var("SESSION_TIMEOUT_MINUTES")
.unwrap_or_else(|_| "30".to_string())
.parse()
.unwrap_or(30)
),
max_login_attempts: env::var("MAX_LOGIN_ATTEMPTS")
.unwrap_or_else(|_| "5".to_string())
.parse()
.unwrap_or(5),
rate_limit_per_minute: env::var("RATE_LIMIT_PER_MINUTE")
.unwrap_or_else(|_| "60".to_string())
.parse()
.unwrap_or(60),
})
}
}
```
### Data Encryption
```rust
use aes_gcm::{Aes256Gcm, Key, Nonce, aead::{Aead, NewAead}};
use rand::{RngCore, rngs::OsRng};
pub struct EncryptionService {
cipher: Aes256Gcm,
}
impl EncryptionService {
pub fn new(key: &[u8]) -> Result<Self, SecurityError> {
if key.len() != 32 {
return Err(SecurityError::InvalidKeyLength);
}
let key = Key::from_slice(key);
let cipher = Aes256Gcm::new(key);
Ok(Self { cipher })
}
pub fn encrypt(&self, plaintext: &[u8]) -> Result<Vec<u8>, SecurityError> {
let mut nonce_bytes = [0u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let mut ciphertext = self.cipher
.encrypt(nonce, plaintext)
.map_err(|_| SecurityError::EncryptionError)?;
// Prepend nonce to ciphertext
let mut result = nonce_bytes.to_vec();
result.append(&mut ciphertext);
Ok(result)
}
pub fn decrypt(&self, encrypted_data: &[u8]) -> Result<Vec<u8>, SecurityError> {
if encrypted_data.len() < 12 {
return Err(SecurityError::InvalidCiphertext);
}
let (nonce_bytes, ciphertext) = encrypted_data.split_at(12);
let nonce = Nonce::from_slice(nonce_bytes);
self.cipher
.decrypt(nonce, ciphertext)
.map_err(|_| SecurityError::DecryptionError)
}
}
// ✅ Secure data structure for sensitive information
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct SensitiveData {
data: Vec<u8>,
}
impl SensitiveData {
pub fn new(data: Vec<u8>) -> Self {
Self { data }
}
pub fn as_slice(&self) -> &[u8] {
&self.data
}
}
```
## 🛡️ ACCESS CONTROL AND AUTHORIZATION
### Role-Based Access Control (RBAC)
```rust
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum Permission {
UserRead,
UserWrite,
UserDelete,
AdminAccess,
SystemConfig,
}
#[derive(Debug, Clone)]
pub struct Role {
pub name: String,
pub permissions: HashSet<Permission>,
}
#[derive(Debug, Clone)]
pub struct User {
pub id: String,
pub roles: HashSet<String>,
}
pub struct AuthorizationService {
roles: HashMap<String, Role>,
user_sessions: HashMap<String, User>,
}
impl AuthorizationService {
pub fn new() -> Self {
let mut roles = HashMap::new();
// Define standard roles
roles.insert("user".to_string(), Role {
name: "user".to_string(),
permissions: [Permission::UserRead].into_iter().collect(),
});
roles.insert("admin".to_string(), Role {
name: "admin".to_string(),
permissions: [
Permission::UserRead,
Permission::UserWrite,
Permission::UserDelete,
Permission::AdminAccess,
].into_iter().collect(),
});
roles.insert("super_admin".to_string(), Role {
name: "super_admin".to_string(),
permissions: [
Permission::UserRead,
Permission::UserWrite,
Permission::UserDelete,
Permission::AdminAccess,
Permission::SystemConfig,
].into_iter().collect(),
});
Self {
roles,
user_sessions: HashMap::new(),
}
}
pub fn check_permission(&self, user_id: &str, permission: &Permission) -> bool {
if let Some(user) = self.user_sessions.get(user_id) {
for role_name in &user.roles {
if let Some(role) = self.roles.get(role_name) {
if role.permissions.contains(permission) {
return true;
}
}
}
}
false
}
pub fn require_permission(&self, user_id: &str, permission: Permission) -> Result<(), SecurityError> {
if self.check_permission(user_id, &permission) {
Ok(())
} else {
Err(SecurityError::InsufficientPermissions)
}
}
}
// ✅ Authorization middleware for web frameworks
pub async fn require_auth_middleware(
auth_service: &AuthorizationService,
user_id: &str,
required_permission: Permission,
) -> Result<(), SecurityError> {
auth_service.require_permission(user_id, required_permission)
}
```
## 🚨 RATE LIMITING AND DDOS PROTECTION
### Rate Limiting Implementation
```rust
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
pub struct RateLimiter {
limits: RwLock<HashMap<String, RateLimit>>,
max_requests: u32,
window_duration: Duration,
}
#[derive(Debug)]
struct RateLimit {
requests: Vec<Instant>,
last_cleanup: Instant,
}
impl RateLimiter {
pub fn new(max_requests: u32, window_duration: Duration) -> Self {
Self {
limits: RwLock::new(HashMap::new()),
max_requests,
window_duration,
}
}
pub async fn check_rate_limit(&self, identifier: &str) -> Result<(), SecurityError> {
let now = Instant::now();
let mut limits = self.limits.write().await;
let rate_limit = limits.entry(identifier.to_string()).or_insert(RateLimit {
requests: Vec::new(),
last_cleanup: now,
});
// Cleanup old requests
if now.duration_since(rate_limit.last_cleanup) > self.window_duration {
rate_limit.requests.retain(|&request_time| {
now.duration_since(request_time) <= self.window_duration
});
rate_limit.last_cleanup = now;
}
// Check if limit exceeded
if rate_limit.requests.len() >= self.max_requests as usize {
return Err(SecurityError::RateLimitExceeded);
}
// Add current request
rate_limit.requests.push(now);
Ok(())
}
}
// ✅ IP-based rate limiting for web endpoints
pub async fn rate_limit_by_ip(
rate_limiter: &RateLimiter,
ip_address: &str,
) -> Result<(), SecurityError> {
rate_limiter.check_rate_limit(ip_address).await
}
```
## 🚨 SECURITY ERROR TYPES
### Comprehensive Security Errors
```rust
#[derive(thiserror::Error, Debug)]
pub enum SecurityError {
#[error("Invalid email format")]
InvalidEmail,
#[error("Weak password")]
WeakPassword,
#[error("Path traversal attempt detected")]
PathTraversal,
#[error("Invalid file path")]
InvalidPath,
#[error("Invalid filename")]
InvalidFilename,
#[error("File too large")]
FileTooLarge,
#[error("Disallowed file type")]
DisallowedFileType,
#[error("File write error")]
FileWriteError,
#[error("Hashing error")]
HashingError,
#[error("Password verification error")]
VerificationError,
#[error("Invalid hash format")]
InvalidHash,
#[error("Token creation error")]
TokenCreationError,
#[error("Invalid token")]
InvalidToken,
#[error("Missing secret configuration")]
MissingSecret,
#[error("Empty secret value")]
EmptySecret,
#[error("Invalid encryption key length")]
InvalidKeyLength,
#[error("Encryption failed")]
EncryptionError,
#[error("Decryption failed")]
DecryptionError,
#[error("Invalid ciphertext")]
InvalidCiphertext,
#[error("Insufficient permissions")]
InsufficientPermissions,
#[error("Rate limit exceeded")]
RateLimitExceeded,
#[error("Authentication required")]
AuthenticationRequired,
#[error("Session expired")]
SessionExpired,
#[error("Account locked")]
AccountLocked,
}
impl SecurityError {
pub fn is_client_error(&self) -> bool {
matches!(
self,
Self::InvalidEmail
| Self::WeakPassword
| Self::PathTraversal
| Self::InvalidPath
| Self::InvalidFilename
| Self::FileTooLarge
| Self::DisallowedFileType
| Self::InvalidToken
| Self::InsufficientPermissions
| Self::RateLimitExceeded
| Self::AuthenticationRequired
)
}
pub fn should_log_details(&self) -> bool {
!self.is_client_error()
}
}
```
## ✅ SECURITY CHECKLIST
```markdown
### Security Implementation Verification
- [ ] All user inputs are validated and sanitized
- [ ] SQL queries use parameterized statements
- [ ] File paths are validated against traversal attacks
- [ ] File uploads are validated by type and size
- [ ] Passwords are hashed with Argon2
- [ ] JWT tokens include expiration and proper validation
- [ ] Secrets are loaded from environment variables
- [ ] Sensitive data structures implement Zeroize
- [ ] Encryption uses authenticated encryption (AES-GCM)
- [ ] Role-based access control is implemented
- [ ] Rate limiting protects against abuse
- [ ] Error messages don't leak sensitive information
- [ ] Security headers are set in HTTP responses
- [ ] Input validation happens on both client and server
- [ ] Audit logging tracks security-relevant events
- [ ] Regular security updates are applied
- [ ] Cryptographic randomness uses secure sources
- [ ] Session management includes timeout and rotation
```
This security guide provides comprehensive protection patterns while maintaining usability and performance in Rust applications.

View File

@@ -0,0 +1,590 @@
---
description:
globs:
alwaysApply: false
---
# 🔍 RUST TYPE SYSTEM BEST PRACTICES
> **TL;DR:** Leverage Rust's powerful type system for safety, performance, and expressiveness through newtype patterns, phantom types, and zero-cost abstractions.
## 🔍 TYPE SYSTEM DESIGN STRATEGY
```mermaid
graph TD
Start["Type Design"] --> DomainCheck{"Domain-Specific<br>Types Needed?"}
DomainCheck -->|Yes| NewtypePattern["Newtype Pattern"]
DomainCheck -->|No| PrimitiveCheck{"Primitive<br>Obsession?"}
NewtypePattern --> StateTracking{"State Tracking<br>Required?"}
PrimitiveCheck -->|Yes| NewtypePattern
PrimitiveCheck -->|No| TraitDesign["Trait Design"]
StateTracking -->|Yes| PhantomTypes["Phantom Types"]
StateTracking -->|No| ValidatedTypes["Validated Types"]
PhantomTypes --> CompileTimeCheck["Compile-Time Validation"]
ValidatedTypes --> RuntimeCheck["Runtime Validation"]
CompileTimeCheck --> ZeroCost["Zero-Cost Abstractions"]
RuntimeCheck --> ZeroCost
TraitDesign --> ZeroCost
ZeroCost --> ErrorModeling["Error Modeling"]
ErrorModeling --> SafetyPatterns["Safety Patterns"]
SafetyPatterns --> Performance["Performance Optimization"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style NewtypePattern fill:#4dbb5f,stroke:#36873f,color:white
style PhantomTypes fill:#ffa64d,stroke:#cc7a30,color:white
style ZeroCost fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 TYPE SAFETY PRINCIPLES
### Newtype Pattern for Domain Modeling
```rust
use derive_more::{Constructor, Display, From, Into};
use serde::{Deserialize, Serialize};
use std::fmt;
// ✅ Strong typing for domain concepts
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Constructor, Display, From, Into)]
pub struct UserId(uuid::Uuid);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Constructor, Display, From, Into)]
pub struct ProductId(uuid::Uuid);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Constructor, Display, From, Into)]
pub struct OrderId(uuid::Uuid);
// ✅ Prevents mixing up IDs at compile time
fn process_order(user_id: UserId, product_id: ProductId) -> OrderId {
// Compiler prevents: process_order(product_id, user_id)
OrderId(uuid::Uuid::new_v4())
}
// ❌ Weak typing - prone to errors
// fn process_order(user_id: String, product_id: String) -> String
```
### Validated Types with Builder Pattern
```rust
use typed_builder::TypedBuilder;
use validator::Validate;
#[derive(Debug, Clone, Serialize, Deserialize, TypedBuilder, Validate)]
#[serde(rename_all = "camelCase")]
pub struct Email {
#[validate(email)]
#[builder(setter(into))]
value: String,
}
impl Email {
pub fn new(value: impl Into<String>) -> Result<Self, ValidationError> {
let email = Self { value: value.into() };
email.validate()?;
Ok(email)
}
pub fn as_str(&self) -> &str {
&self.value
}
}
impl fmt::Display for Email {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.value)
}
}
// ✅ Usage - compile-time guarantee of valid email
let email = Email::new("user@example.com")?;
```
### Phantom Types for Compile-Time State
```rust
use std::marker::PhantomData;
// State types
pub struct Draft;
pub struct Published;
pub struct Archived;
// Document with compile-time state tracking
#[derive(Debug, Clone)]
pub struct Document<State> {
id: DocumentId,
title: String,
content: String,
_state: PhantomData<State>,
}
impl<State> Document<State> {
pub fn id(&self) -> DocumentId {
self.id
}
pub fn title(&self) -> &str {
&self.title
}
}
impl Document<Draft> {
pub fn new(title: String, content: String) -> Self {
Self {
id: DocumentId::new(),
title,
content,
_state: PhantomData,
}
}
pub fn publish(self) -> Document<Published> {
Document {
id: self.id,
title: self.title,
content: self.content,
_state: PhantomData,
}
}
}
impl Document<Published> {
pub fn archive(self) -> Document<Archived> {
Document {
id: self.id,
title: self.title,
content: self.content,
_state: PhantomData,
}
}
pub fn content(&self) -> &str {
&self.content
}
}
impl Document<Archived> {
pub fn restore(self) -> Document<Draft> {
Document {
id: self.id,
title: self.title,
content: self.content,
_state: PhantomData,
}
}
}
// ✅ Usage - compiler prevents invalid state transitions
let draft = Document::<Draft>::new("Title".to_string(), "Content".to_string());
let published = draft.publish();
let archived = published.archive();
// Compiler error: draft.archive() - can't archive a draft
```
## 🔄 TRAIT DESIGN PATTERNS
### Trait Objects vs Generic Bounds
```rust
// ✅ Use generics for known types at compile time
pub fn process_items<T: Processable>(items: &[T]) -> Vec<T::Output> {
items.iter().map(|item| item.process()).collect()
}
// ✅ Use trait objects for runtime polymorphism
pub struct EventBus {
handlers: Vec<Box<dyn EventHandler>>,
}
impl EventBus {
pub fn register_handler(&mut self, handler: Box<dyn EventHandler>) {
self.handlers.push(handler);
}
pub fn dispatch(&self, event: &Event) {
for handler in &self.handlers {
handler.handle(event);
}
}
}
// ✅ Async trait pattern
#[async_trait::async_trait]
pub trait AsyncProcessor {
type Error;
type Output;
async fn process(&self, input: &[u8]) -> Result<Self::Output, Self::Error>;
}
```
### Associated Types vs Generic Parameters
```rust
// ✅ Use associated types for tight coupling
pub trait Iterator {
type Item; // One Item type per Iterator implementation
fn next(&mut self) -> Option<Self::Item>;
}
// ✅ Use generic parameters for flexibility
pub trait Convert<T, U> {
fn convert(&self, input: T) -> U;
}
// Example: A single type can implement multiple conversions
impl Convert<String, i32> for NumberParser {
fn convert(&self, input: String) -> i32 { /* ... */ }
}
impl Convert<String, f64> for NumberParser {
fn convert(&self, input: String) -> f64 { /* ... */ }
}
```
## 📊 ENUM DESIGN PATTERNS
### Comprehensive Error Modeling
```rust
#[derive(thiserror::Error, Debug)]
pub enum UserServiceError {
#[error("User not found: {user_id}")]
NotFound { user_id: UserId },
#[error("Email already exists: {email}")]
EmailExists { email: Email },
#[error("Database error: {source}")]
Database {
#[from]
source: sqlx::Error,
},
#[error("Validation error: {message}")]
Validation { message: String },
#[error("Permission denied: {action} requires {permission}")]
PermissionDenied {
action: String,
permission: String,
},
}
// ✅ Structured error handling with context
impl UserServiceError {
pub fn is_retryable(&self) -> bool {
matches!(self, Self::Database { .. })
}
pub fn error_code(&self) -> &'static str {
match self {
Self::NotFound { .. } => "USER_NOT_FOUND",
Self::EmailExists { .. } => "EMAIL_EXISTS",
Self::Database { .. } => "DATABASE_ERROR",
Self::Validation { .. } => "VALIDATION_ERROR",
Self::PermissionDenied { .. } => "PERMISSION_DENIED",
}
}
}
```
### State Machine with Enums
```rust
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "status", content = "data", rename_all = "camelCase")]
pub enum OrderStatus {
Pending { items: Vec<OrderItem> },
Processing { estimated_completion: DateTime<Utc> },
Shipped { tracking_number: String, carrier: String },
Delivered { delivery_time: DateTime<Utc> },
Cancelled { reason: String, refund_issued: bool },
}
impl OrderStatus {
pub fn can_cancel(&self) -> bool {
matches!(self, Self::Pending { .. } | Self::Processing { .. })
}
pub fn can_ship(&self) -> bool {
matches!(self, Self::Processing { .. })
}
pub fn is_final(&self) -> bool {
matches!(self, Self::Delivered { .. } | Self::Cancelled { .. })
}
}
// ✅ Type-safe state transitions
impl Order {
pub fn ship(mut self, tracking_number: String, carrier: String) -> Result<Self, OrderError> {
match self.status {
OrderStatus::Processing { .. } => {
self.status = OrderStatus::Shipped { tracking_number, carrier };
Ok(self)
}
_ => Err(OrderError::InvalidStateTransition {
from: self.status.clone(),
to: "Shipped".to_string(),
}),
}
}
}
```
## 🛡️ SAFETY PATTERNS
### Option and Result Combinators
```rust
// ✅ Chain operations safely
fn process_user_data(user_id: UserId) -> Result<ProcessedData, ServiceError> {
find_user(user_id)?
.and_then(|user| user.profile.as_ref().ok_or(ServiceError::MissingProfile))
.and_then(|profile| validate_profile(profile))
.map(|profile| process_profile(profile))
}
// ✅ Use combinators for cleaner code
fn get_user_email(user_id: UserId) -> Option<Email> {
find_user(user_id)
.ok()
.and_then(|user| user.email)
.filter(|email| email.is_verified())
}
// ✅ Error conversion with context
fn create_user(request: CreateUserRequest) -> Result<User, UserServiceError> {
validate_email(&request.email)
.map_err(|e| UserServiceError::Validation { message: e.to_string() })?;
repository
.create_user(request)
.await
.map_err(UserServiceError::from)
}
```
### Custom Smart Pointers
```rust
use std::ops::{Deref, DerefMut};
// ✅ Validated wrapper that maintains invariants
#[derive(Debug)]
pub struct NonEmptyVec<T> {
inner: Vec<T>,
}
impl<T> NonEmptyVec<T> {
pub fn new(first: T) -> Self {
Self {
inner: vec![first],
}
}
pub fn try_from_vec(vec: Vec<T>) -> Result<Self, EmptyVecError> {
if vec.is_empty() {
Err(EmptyVecError)
} else {
Ok(Self { inner: vec })
}
}
pub fn push(&mut self, item: T) {
self.inner.push(item);
}
pub fn first(&self) -> &T {
// Safe to unwrap because we maintain the non-empty invariant
self.inner.first().unwrap()
}
}
impl<T> Deref for NonEmptyVec<T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
&self.inner
}
}
```
## 🎨 ZERO-COST ABSTRACTIONS
### Compile-Time Constants
```rust
// ✅ Use const generics for compile-time validation
#[derive(Debug, Clone)]
pub struct FixedArray<T, const N: usize> {
data: [T; N],
}
impl<T: Default + Copy, const N: usize> FixedArray<T, N> {
pub fn new() -> Self {
Self {
data: [T::default(); N],
}
}
}
// ✅ Type-level programming with const generics
pub struct Matrix<T, const ROWS: usize, const COLS: usize> {
data: [[T; COLS]; ROWS],
}
impl<T, const ROWS: usize, const COLS: usize> Matrix<T, ROWS, COLS> {
pub fn multiply<const OTHER_COLS: usize>(
self,
other: Matrix<T, COLS, OTHER_COLS>,
) -> Matrix<T, ROWS, OTHER_COLS>
where
T: Default + Copy + std::ops::Add<Output = T> + std::ops::Mul<Output = T>,
{
// Matrix multiplication with compile-time dimension checking
todo!()
}
}
```
### Builder with Type State
```rust
// ✅ Builder pattern with compile-time validation
pub struct ConfigBuilder<HasHost, HasPort> {
host: Option<String>,
port: Option<u16>,
timeout: Option<Duration>,
_marker: PhantomData<(HasHost, HasPort)>,
}
pub struct Missing;
pub struct Present;
impl ConfigBuilder<Missing, Missing> {
pub fn new() -> Self {
Self {
host: None,
port: None,
timeout: None,
_marker: PhantomData,
}
}
}
impl<HasPort> ConfigBuilder<Missing, HasPort> {
pub fn host(self, host: String) -> ConfigBuilder<Present, HasPort> {
ConfigBuilder {
host: Some(host),
port: self.port,
timeout: self.timeout,
_marker: PhantomData,
}
}
}
impl<HasHost> ConfigBuilder<HasHost, Missing> {
pub fn port(self, port: u16) -> ConfigBuilder<HasHost, Present> {
ConfigBuilder {
host: self.host,
port: Some(port),
timeout: self.timeout,
_marker: PhantomData,
}
}
}
impl<HasHost, HasPort> ConfigBuilder<HasHost, HasPort> {
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = Some(timeout);
self
}
}
// Only allow build when both host and port are set
impl ConfigBuilder<Present, Present> {
pub fn build(self) -> Config {
Config {
host: self.host.unwrap(),
port: self.port.unwrap(),
timeout: self.timeout.unwrap_or(Duration::from_secs(30)),
}
}
}
// ✅ Usage - compiler ensures required fields
let config = ConfigBuilder::new()
.host("localhost".to_string())
.port(8080)
.timeout(Duration::from_secs(60))
.build();
```
## 🚨 TYPE SYSTEM ANTI-PATTERNS
### What to Avoid
```rust
// ❌ Weak typing - error prone
fn calculate_discount(price: f64, percentage: f64) -> f64 {
// Could accidentally pass percentage as price
price * (percentage / 100.0)
}
// ✅ Strong typing prevents errors
#[derive(Debug, Clone, Copy)]
pub struct Price(f64);
#[derive(Debug, Clone, Copy)]
pub struct Percentage(f64);
fn calculate_discount(price: Price, percentage: Percentage) -> Price {
Price(price.0 * (percentage.0 / 100.0))
}
// ❌ Overuse of String for everything
// struct User {
// id: String,
// email: String,
// status: String,
// }
// ✅ Proper typing
struct User {
id: UserId,
email: Email,
status: UserStatus,
}
// ❌ Large enums with mixed concerns
// enum AppState {
// Loading,
// UserData(User),
// Error(String),
// DatabaseConnection(Database),
// HttpRequest(Request),
// }
// ✅ Focused enums
enum LoadingState {
Loading,
Loaded(User),
Failed(LoadError),
}
```
## ✅ TYPE SYSTEM CHECKLIST
```markdown
### Type System Implementation Verification
- [ ] Uses newtype pattern for domain concepts
- [ ] Phantom types for compile-time state tracking
- [ ] Associated types vs generics chosen appropriately
- [ ] Enums model state machines correctly
- [ ] Option/Result combinators used over unwrap
- [ ] Zero-cost abstractions leverage compile-time checks
- [ ] Builder patterns enforce required fields
- [ ] Error types are structured and informative
- [ ] No primitive obsession (avoid String/i32 for everything)
- [ ] Type safety prevents common runtime errors
- [ ] Const generics used for compile-time validation
- [ ] Trait objects vs generics chosen appropriately
```
This type system guide leverages Rust's powerful type system to catch errors at compile time and create more maintainable, expressive code.

View File

@@ -135,7 +135,9 @@ pub struct DatabaseConfig {
pub struct AuthConfig {
pub jwt_secret: String,
pub token_expiry_hours: u64,
pub bcrypt_cost: u32,
pub argon2_mem_cost: u32,
pub argon2_time_cost: u32,
pub argon2_parallelism: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
@@ -180,8 +182,14 @@ impl AppConfig {
token_expiry_hours: std::env::var("TOKEN_EXPIRY_HOURS")
.unwrap_or_else(|_| "24".to_string())
.parse()?,
bcrypt_cost: std::env::var("BCRYPT_COST")
.unwrap_or_else(|_| "12".to_string())
argon2_mem_cost: std::env::var("ARGON2_MEM_COST")
.unwrap_or_else(|_| "65536".to_string())
.parse()?,
argon2_time_cost: std::env::var("ARGON2_TIME_COST")
.unwrap_or_else(|_| "3".to_string())
.parse()?,
argon2_parallelism: std::env::var("ARGON2_PARALLELISM")
.unwrap_or_else(|_| "4".to_string())
.parse()?,
},
features: FeatureFlags {
@@ -218,7 +226,9 @@ impl Default for AppConfig {
auth: AuthConfig {
jwt_secret: "development-secret".to_string(),
token_expiry_hours: 24,
bcrypt_cost: 12,
argon2_mem_cost: 65536,
argon2_time_cost: 3,
argon2_parallelism: 4,
},
features: FeatureFlags {
enable_registration: true,

View File

@@ -7,6 +7,40 @@ alwaysApply: false
> **TL;DR:** Modern CLI application patterns using clap 4.0+ with derive features, subcommands, enum_dispatch, and production-ready command execution architecture.
## 🔍 CLI APPLICATION DESIGN STRATEGY
```mermaid
graph TD
Start["CLI Application"] --> CLIType{"CLI<br>Complexity?"}
CLIType -->|Simple| SimpleCLI["Single Command CLI"]
CLIType -->|Complex| ComplexCLI["Multi-Command CLI"]
SimpleCLI --> DirectExecution["Direct Execution"]
ComplexCLI --> SubcommandArch["Subcommand Architecture"]
SubcommandArch --> EnumDispatch["enum_dispatch Pattern"]
EnumDispatch --> TraitExecution["CommandExecutor Trait"]
DirectExecution --> ErrorHandling["Error Handling"]
TraitExecution --> ErrorHandling
ErrorHandling --> UserFeedback["User Feedback"]
UserFeedback --> ProgressIndicators["Progress Indicators"]
ProgressIndicators --> Configuration["Configuration Management"]
Configuration --> Testing["CLI Testing"]
Testing --> Documentation["Help & Documentation"]
Documentation --> Completion["Shell Completion"]
Completion --> Production["Production CLI"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style SimpleCLI fill:#4dbb5f,stroke:#36873f,color:white
style ComplexCLI fill:#ffa64d,stroke:#cc7a30,color:white
style EnumDispatch fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 CLI FRAMEWORK REQUIREMENTS
### Clap 4.0+ Configuration

View File

@@ -7,6 +7,42 @@ alwaysApply: false
> **TL;DR:** Modern async/await patterns and thread-safe data structures for high-performance Rust applications.
## 🔍 CONCURRENCY ARCHITECTURE STRATEGY
```mermaid
graph TD
Start["Concurrency Requirements"] --> ConcurrencyType{"Concurrency<br>Pattern?"}
ConcurrencyType -->|Async I/O| AsyncPattern["Async/Await Pattern"]
ConcurrencyType -->|CPU Intensive| ParallelPattern["Parallel Processing"]
ConcurrencyType -->|Shared State| SharedStatePattern["Shared State Management"]
ConcurrencyType -->|Message Passing| MessagePattern["Message Passing"]
AsyncPattern --> TokioRuntime["Tokio Runtime"]
ParallelPattern --> Rayon["Rayon Parallel Iterators"]
SharedStatePattern --> DashMap["DashMap Collections"]
MessagePattern --> Channels["Channel Communication"]
TokioRuntime --> AsyncPrimitives["Async Sync Primitives"]
Rayon --> ThreadPool["Thread Pool Management"]
DashMap --> LockFree["Lock-Free Data Structures"]
Channels --> ChannelTypes["Channel Type Selection"]
AsyncPrimitives --> ErrorHandling["Error Handling"]
ThreadPool --> ErrorHandling
LockFree --> ErrorHandling
ChannelTypes --> ErrorHandling
ErrorHandling --> Testing["Concurrency Testing"]
Testing --> Performance["Performance Monitoring"]
Performance --> Production["Production Concurrency"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style AsyncPattern fill:#4dbb5f,stroke:#36873f,color:white
style SharedStatePattern fill:#ffa64d,stroke:#cc7a30,color:white
style MessagePattern fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 ASYNC RUNTIME SELECTION
### Tokio as the Standard

View File

@@ -7,6 +7,41 @@ alwaysApply: false
> **TL;DR:** Comprehensive guidelines for database access in Rust using SQLx, focusing on type safety, async patterns, and testing strategies.
## 🔍 DATABASE ARCHITECTURE STRATEGY
```mermaid
graph TD
Start["Database Integration"] --> DBChoice{"Database<br>Type?"}
DBChoice -->|PostgreSQL| Postgres["PostgreSQL with SQLx"]
DBChoice -->|SQLite| SQLite["SQLite with SQLx"]
DBChoice -->|Multiple| MultiDB["Multi-Database Support"]
Postgres --> ConnectionPool["Connection Pool Setup"]
SQLite --> ConnectionPool
MultiDB --> ConnectionPool
ConnectionPool --> Migrations["Database Migrations"]
Migrations --> EntityDesign["Entity Design"]
EntityDesign --> RepositoryPattern["Repository Pattern"]
RepositoryPattern --> QueryPatterns["Query Patterns"]
QueryPatterns --> TypeSafety["Type Safety"]
TypeSafety --> ErrorHandling["Error Handling"]
ErrorHandling --> Testing["Database Testing"]
Testing --> Transactions["Transaction Management"]
Transactions --> Performance["Performance Optimization"]
Performance --> Production["Production Database"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style ConnectionPool fill:#4dbb5f,stroke:#36873f,color:white
style RepositoryPattern fill:#ffa64d,stroke:#cc7a30,color:white
style Testing fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 DATABASE LIBRARY SELECTION
### SQLx as the Standard

View File

@@ -7,6 +7,37 @@ alwaysApply: false
> **TL;DR:** Modern HTTP client patterns using reqwest with proper error handling, timeouts, and security configurations.
## 🔍 HTTP CLIENT ARCHITECTURE STRATEGY
```mermaid
graph TD
Start["HTTP Client Requirements"] --> ClientType{"Client<br>Usage Pattern?"}
ClientType -->|Simple Requests| SimpleClient["Simple Request Pattern"]
ClientType -->|Complex Integration| AdvancedClient["Advanced Client Pattern"]
ClientType -->|Service Integration| ServiceClient["Service Client Pattern"]
SimpleClient --> BasicConfig["Basic Configuration"]
AdvancedClient --> BuilderPattern["Builder Pattern"]
ServiceClient --> TypedClient["Typed Client"]
BasicConfig --> ErrorHandling["Error Handling"]
BuilderPattern --> ErrorHandling
TypedClient --> ErrorHandling
ErrorHandling --> RetryLogic["Retry Logic"]
RetryLogic --> Authentication["Authentication"]
Authentication --> Monitoring["Request Monitoring"]
Monitoring --> Testing["Client Testing"]
Testing --> Production["Production HTTP Client"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style SimpleClient fill:#4dbb5f,stroke:#36873f,color:white
style AdvancedClient fill:#ffa64d,stroke:#cc7a30,color:white
style ServiceClient fill:#d94dbb,stroke:#a3378a,color:white
```
## 🔧 REQWEST CONFIGURATION
### Standard Dependencies

View File

@@ -7,6 +7,37 @@ alwaysApply: false
> **TL;DR:** Modern protobuf and gRPC patterns using prost/tonic 0.13+ with clean code generation, Inner data structures, MessageSanitizer trait, gRPC reflection, and simplified service implementations.
## 🔍 PROTOBUF & GRPC DESIGN STRATEGY
```mermaid
graph TD
Start["gRPC Service Design"] --> ProtoDesign["Protocol Buffer Design"]
ProtoDesign --> CodeGen["Code Generation"]
CodeGen --> DataStructures["Data Structure Design"]
DataStructures --> InnerTypes["Inner Types Pattern"]
DataStructures --> Sanitization["Message Sanitization"]
InnerTypes --> BusinessLogic["Business Logic Separation"]
Sanitization --> BusinessLogic
BusinessLogic --> ServiceImpl["Service Implementation"]
ServiceImpl --> ErrorHandling["Error Handling"]
ErrorHandling --> Testing["Service Testing"]
Testing --> Reflection["gRPC Reflection"]
Reflection --> Deployment["Service Deployment"]
Deployment --> Monitoring["Monitoring & Observability"]
Monitoring --> Production["Production gRPC Service"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style InnerTypes fill:#4dbb5f,stroke:#36873f,color:white
style Sanitization fill:#ffa64d,stroke:#cc7a30,color:white
style ServiceImpl fill:#d94dbb,stroke:#a3378a,color:white
```
## 🎯 PROTOBUF & GRPC FRAMEWORK REQUIREMENTS
### Prost/Tonic Configuration

View File

@@ -7,6 +7,39 @@ alwaysApply: false
> **TL;DR:** Essential tools and configuration patterns for modern Rust applications, focusing on logging, configuration management, and templating.
## 🔍 TOOLS & CONFIGURATION STRATEGY
```mermaid
graph TD
Start["Application Setup"] --> ConfigType{"Configuration<br>Complexity?"}
ConfigType -->|Simple| EnvVars["Environment Variables"]
ConfigType -->|Complex| YAMLConfig["YAML Configuration"]
EnvVars --> Logging["Logging Setup"]
YAMLConfig --> ConfigValidation["Configuration Validation"]
ConfigValidation --> Logging
Logging --> StructuredLogging["Structured Logging"]
StructuredLogging --> LogRotation["Log Rotation"]
LogRotation --> Templating{"Template<br>Engine Needed?"}
Templating -->|Yes| MiniJinja["MiniJinja Templates"]
Templating -->|No| DataProcessing["Data Processing"]
MiniJinja --> DataProcessing
DataProcessing --> JSONPath["JSON Path Extraction"]
JSONPath --> Monitoring["Application Monitoring"]
Monitoring --> Production["Production Tools"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style YAMLConfig fill:#4dbb5f,stroke:#36873f,color:white
style StructuredLogging fill:#ffa64d,stroke:#cc7a30,color:white
style MiniJinja fill:#d94dbb,stroke:#a3378a,color:white
```
## 📊 LOGGING AND OBSERVABILITY
### Tracing Ecosystem (Not env_logger)

View File

@@ -7,6 +7,49 @@ alwaysApply: false
> **TL;DR:** Essential utility patterns for authentication, CLI tools, data structures, and common development tasks.
## 🔍 UTILITY LIBRARY SELECTION STRATEGY
```mermaid
graph TD
Start["Utility Requirements"] --> UtilityType{"Utility<br>Category?"}
UtilityType -->|Authentication| AuthUtils["Authentication Utilities"]
UtilityType -->|CLI Tools| CLIUtils["CLI Utilities"]
UtilityType -->|Data Structures| DataUtils["Data Structure Utilities"]
UtilityType -->|Validation| ValidationUtils["Validation Utilities"]
AuthUtils --> JWT["JWT Token Management"]
AuthUtils --> PasswordHash["Password Hashing"]
CLIUtils --> ClapCLI["Clap CLI Framework"]
CLIUtils --> ProgressBars["Progress Indicators"]
DataUtils --> TypedBuilder["TypedBuilder Pattern"]
DataUtils --> EnumDispatch["enum_dispatch"]
ValidationUtils --> SerdeValidation["Serde Validation"]
ValidationUtils --> CustomValidation["Custom Validators"]
JWT --> Security["Security Implementation"]
PasswordHash --> Security
ClapCLI --> UserInterface["User Interface"]
ProgressBars --> UserInterface
TypedBuilder --> CodeGeneration["Code Generation"]
EnumDispatch --> CodeGeneration
SerdeValidation --> DataIntegrity["Data Integrity"]
CustomValidation --> DataIntegrity
Security --> Production["Production Utilities"]
UserInterface --> Production
CodeGeneration --> Production
DataIntegrity --> Production
style Start fill:#4da6ff,stroke:#0066cc,color:white
style AuthUtils fill:#4dbb5f,stroke:#36873f,color:white
style CLIUtils fill:#ffa64d,stroke:#cc7a30,color:white
style DataUtils fill:#d94dbb,stroke:#a3378a,color:white
```
## 🔐 AUTHENTICATION AND SECURITY
### JWT with jsonwebtoken

View File

@@ -77,7 +77,12 @@ graph TD
Main --> Complexity{"Project<br>Complexity?"}
Core --> Quality["Code Quality Rules"]
Core --> Testing["Testing Standards"]
Core --> Dependencies["Dependency Management"]
Core --> Types["Type System Patterns"]
Core --> Performance["Performance Guidelines"]
Core --> Security["Security Standards"]
Core --> API["API Design Principles"]
Core --> Patterns["Design Patterns"]
Core --> ErrorHandling["Error Handling"]
Complexity -->|Simple| SimpleRules["Simple Project Rules"]
@@ -112,10 +117,15 @@ graph TD
## 📋 CORE PRINCIPLES (ALWAYS APPLIED)
1. **Code Quality**: Follow DRY/SRP principles, function size limits
2. **File Organization**: Functionality-based structure, not type-based
3. **Error Handling**: Consistent error handling patterns
4. **Testing**: Comprehensive unit test coverage
5. **Documentation**: Clear, maintainable code documentation
2. **Dependencies**: Workspace-first, security-focused dependency management
3. **Type System**: Leverage newtype patterns, phantom types, and zero-cost abstractions
4. **Performance**: Profile-driven optimization, memory-efficient patterns
5. **Security**: Input validation, secure secrets management, authorization
6. **API Design**: Ergonomic interfaces, builder patterns, comprehensive documentation
7. **File Organization**: Functionality-based structure, not type-based
8. **Error Handling**: Consistent error handling patterns with structured errors
9. **Testing**: Comprehensive unit test coverage with mocking strategies
10. **Documentation**: Clear, maintainable code documentation with examples
## 🚀 PROJECT INITIALIZATION WORKFLOW
@@ -251,6 +261,12 @@ Based on project analysis, load specific rule sets:
| Module | File | Description |
|--------|------|-------------|
| **Core** | `core/code-quality.mdc` | Rust 2024, no unsafe, production-ready code |
| **Core** | `core/dependencies.mdc` | Centralized dependency management and workspace patterns |
| **Core** | `core/type-system.mdc` | Type system mastery, newtype patterns, phantom types |
| **Core** | `core/performance.mdc` | Performance optimization, SIMD, memory management |
| **Core** | `core/security.mdc` | Security patterns, Argon2 hashing, encryption |
| **Core** | `core/api-design.mdc` | Ergonomic API design, builder patterns, trait design |
| **Core** | `core/design-patterns.mdc` | Essential design patterns, actor model, strategy |
| **Simple** | `simple/single-crate.mdc` | Single crate project structure |
| **Complex** | `complex/workspace.mdc` | Multi-crate workspace management |
| **Web** | `features/axum.mdc` | Axum 0.8 patterns, OpenAPI with utoipa |

View File

@@ -7,6 +7,45 @@ alwaysApply: false
> **TL;DR:** Guidelines for organizing simple Rust projects using a single crate structure with clean separation of concerns and maintainable file organization.
## 🔍 SINGLE CRATE DESIGN STRATEGY
```mermaid
graph TD
Start["Single Crate Project"] --> CrateType{"Crate<br>Type?"}
CrateType -->|Binary| BinaryStructure["Binary Crate Structure"]
CrateType -->|Library| LibraryStructure["Library Crate Structure"]
CrateType -->|Mixed| MixedStructure["Mixed Crate Structure"]
BinaryStructure --> MinimalMain["Minimal main.rs"]
BinaryStructure --> CoreLib["Core Logic in lib.rs"]
LibraryStructure --> PublicAPI["Public API Design"]
LibraryStructure --> ModuleOrg["Module Organization"]
MixedStructure --> BinaryEntry["Binary Entry Point"]
MixedStructure --> LibraryAPI["Library API"]
MinimalMain --> FeatureModules["Feature-Based Modules"]
CoreLib --> FeatureModules
PublicAPI --> FeatureModules
ModuleOrg --> FeatureModules
BinaryEntry --> FeatureModules
LibraryAPI --> FeatureModules
FeatureModules --> ErrorHandling["Centralized Error Handling"]
ErrorHandling --> Configuration["Configuration Management"]
Configuration --> Testing["Testing Strategy"]
Testing --> Documentation["Documentation"]
Documentation --> Production["Production Single Crate"]
style Start fill:#4da6ff,stroke:#0066cc,color:white
style BinaryStructure fill:#4dbb5f,stroke:#36873f,color:white
style LibraryStructure fill:#ffa64d,stroke:#cc7a30,color:white
style MixedStructure fill:#d94dbb,stroke:#a3378a,color:white
```
## 🏗️ PROJECT STRUCTURE OVERVIEW
```mermaid

View File

@@ -167,3 +167,5 @@ async pub fn say_hello(
4. 不要添加 protoc_arg
5. 对 primitive type 不需要 sanitize_otional_xxx。
6. TypedBuilder 用法遵循:并对每个字段根据情况引入 default, default_code, 以及 setter(strip_option), setter(into), 或者 setter(strip_option, into)。比如 Option<String> 要使用 `#[builder(default, setter(strip_option, into)]`. 不要滥用 default。
请仔细审核 @/rust rule set看各部分内容是否有重复是否正交请相应修改和重构另外如果还有 best practice 和 rust 最佳实践和设计模式没有写进去,请添加。