Complete guide to using the Job Agent system.
pip install -r requirements.txtcp .env.example .env
# Edit .env with your keysGet API keys:
python cli.py init-profileOr create programmatically:
from main import create_orchestrator
orchestrator = create_orchestrator()
profile = {
"user_id": "your_user_id",
"name": "Your Name",
"skills": ["Python", "React", "AWS"],
"experience_level": "Mid", # Entry, Mid, Senior, Lead
"preferred_roles": ["Software Engineer", "Backend Engineer"],
"preferred_locations": ["Remote", "San Francisco"],
"job_goal": "interviews", # interviews, fast placement, high pay
"resume_text": "Your complete resume text here..."
}
orchestrator.create_user_profile(profile)Find relevant jobs:
# Discover 50 jobs
python cli.py discover --max-jobs 50
# Discover for specific user
python cli.py discover --user john_doe --max-jobs 30Apply to discovered jobs:
# Apply with auto-mode
python cli.py apply --auto --min-score 0.7
# Apply with confirmation
python cli.py apply --min-score 0.65
# Limit applications
python cli.py apply --auto --max-apps 5 --min-score 0.8Send follow-up messages:
# Send follow-ups
python cli.py followup
# Dry run (generate but don't send)
python cli.py followup --dry-runCheck system status:
python cli.py statusOutput example:
╭─────────────────── Profile ────────────────────╮
│ │
│ User Profile │
│ │
│ Name: Alex Johnson │
│ Goal: interviews │
│ │
╰────────────────────────────────────────────────╯
Jobs
┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┓
┃ Metric ┃ Value ┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━┩
│ Total Discovered │ 50 │
│ High Quality │ 15 │
│ Average Score │ 0.72 │
└─────────────────────┴────────┘
Applications
┏━━━━━━━━━━━━━━━┳━━━━━━━━┓
┃ Metric ┃ Value ┃
┡━━━━━━━━━━━━━━━╇━━━━━━━━┩
│ Total │ 25 │
│ Pending │ 18 │
│ Responses │ 7 │
│ Response Rate │ 28.0% │
└───────────────┴────────┘
Run complete autonomous cycle:
# Full automation
python cli.py run --auto --max-jobs 50 --min-score 0.7
# Conservative mode
python cli.py run --max-jobs 30 --min-score 0.8Check API costs:
python cli.py costsfrom main import create_orchestrator
# Create orchestrator
orchestrator = create_orchestrator()
# Run discovery
results = orchestrator.run_discovery(
user_id="your_user_id",
max_jobs=50
)
print(f"Found {results['jobs_discovered']} jobs")
print(f"Top score: {results['top_job']['relevance_score']}")
# Apply to jobs
app_results = orchestrator.run_applications(
user_id="your_user_id",
min_score=0.7,
auto_mode=True
)
print(f"Submitted {app_results['applications_submitted']} applications")from main import create_orchestrator
from core.database import get_database
from core.memory import get_memory_system
orchestrator = create_orchestrator()
db = get_database()
memory = get_memory_system()
# Get detailed job data
jobs = db.get_jobs(min_score=0.8, limit=20)
for job in jobs:
print(f"{job['title']} at {job['company']} - {job['relevance_score']}")
# Get keyword recommendations
keywords = memory.get_keyword_recommendations(
job_keywords=["python", "aws", "docker"],
top_n=10
)
print(f"High-value keywords: {keywords}")
# Get company insights
insights = memory.get_company_insights("Google")
print(f"Response rate: {insights['response_rate']}")
# Get performance summary
perf = memory.get_performance_summary()
print(f"Total applications: {perf['total_applications']}")
print(f"Response rate: {perf['response_rate']:.1%}")
# Get improvement suggestions
suggestions = memory.suggest_improvements()
for s in suggestions:
print(f"- {s}")from agents.discovery import create_discovery_agent
from agents.resume import create_resume_agent
from agents.application import create_application_agent
import json
# Load config
with open("config/settings.json") as f:
config = json.load(f)
# Create agents
discovery = create_discovery_agent(config)
resume_agent = create_resume_agent(config)
app_agent = create_application_agent(config)
# Custom discovery
user_profile = {...}
jobs = discovery.discover_jobs(user_profile, max_jobs=100)
# Custom filtering
high_quality = [j for j in jobs if j['relevance_score'] > 0.9]
# Custom resume for each
for job in high_quality:
custom_resume = resume_agent.customize_resume(
base_resume=user_profile['resume_text'],
job=job,
user_profile=user_profile
)
cover_letter = resume_agent.generate_cover_letter(job, user_profile)
print(f"Customized for {job['title']} at {job['company']}")# Add to crontab
crontab -e
# Discovery at 9 AM
0 9 * * * cd /path/to/job_agent && ./venv/bin/python cli.py discover --max-jobs 50
# Applications at 2 PM
0 14 * * * cd /path/to/job_agent && ./venv/bin/python cli.py apply --auto --min-score 0.7
# Follow-ups at 6 PM
0 18 * * * cd /path/to/job_agent && ./venv/bin/python cli.py followupimport schedule
import time
from main import create_orchestrator
orchestrator = create_orchestrator()
def morning_discovery():
print("Running morning discovery...")
orchestrator.run_discovery(max_jobs=50)
def afternoon_applications():
print("Submitting applications...")
orchestrator.run_applications(auto_mode=True, min_score=0.7)
def evening_followups():
print("Sending follow-ups...")
orchestrator.run_followups()
# Schedule tasks
schedule.every().day.at("09:00").do(morning_discovery)
schedule.every().day.at("14:00").do(afternoon_applications)
schedule.every().day.at("18:00").do(evening_followups)
# Run scheduler
while True:
schedule.run_pending()
time.sleep(60)# service.py
from main import create_orchestrator
import time
orchestrator = create_orchestrator()
while True:
try:
# Run full cycle every 6 hours
print("Starting autonomous cycle...")
results = orchestrator.run_full_cycle(auto_apply=True)
print(f"Jobs: {results['discovery']['jobs_discovered']}")
print(f"Applications: {results['applications']['applications_submitted']}")
# Sleep for 6 hours
time.sleep(6 * 60 * 60)
except Exception as e:
print(f"Error: {e}")
time.sleep(60) # Wait 1 minute on errorEdit config/settings.json:
{
"application_rules": {
"min_relevance_score": 0.65,
"max_daily_applications": 10,
"require_resume_customization": true,
"require_cover_letter": true
}
}{
"followup_schedule": {
"first_followup_days": 3,
"second_followup_days": 7,
"max_followups": 2,
"only_for_scores_above": 0.75
}
}{
"llm_config": {
"discovery_model": "groq",
"resume_model": "gemini",
"temperature": {
"discovery": 0.1,
"resume": 0.3,
"cover_letter": 0.4
}
}
}Do:
- Include detailed skills list
- Quantify achievements in resume
- Specify clear job goals
- Update profile regularly
Don't:
- Use generic resume
- Leave skills vague
- Set unrealistic filters
Conservative (High Quality):
python cli.py apply --min-score 0.85 --max-apps 5Aggressive (High Volume):
python cli.py apply --min-score 0.60 --max-apps 20Balanced:
python cli.py apply --min-score 0.70 --max-apps 10Daily checklist:
# Check status
python cli.py status
# Review logs
tail -f logs/agent.log
# Check costs
python cli.py costs
# Database size
du -h data/jobs.dbThe system improves over time. Let it run for at least 2 weeks to:
- Learn keyword effectiveness
- Identify responsive companies
- Optimize application timing
- Improve success rates
# Check profile
from core.database import get_database
db = get_database()
profile = db.get_user_profile()
print(profile)
# Verify job sources enabled
import json
with open("config/settings.json") as f:
config = json.load(f)
print(config['job_sources'])# Get suggestions
from core.memory import get_memory_system
memory = get_memory_system()
suggestions = memory.suggest_improvements()
for s in suggestions:
print(f"- {s}")
# Check application quality
from core.database import get_database
db = get_database()
apps = db.get_applications(status="pending")
print(f"Pending: {len(apps)}")# Check API keys
cat .env | grep API_KEY
# Test LLM connection
python -c "from core.llm import get_llm_client; client = get_llm_client(); print('Success')"
# Check rate limits
# Edit .env
GROQ_RPM_LIMIT=20
GEMINI_RPM_LIMIT=10from core.database import get_database
import json
db = get_database()
apps = db.get_applications()
with open("applications_export.json", "w") as f:
json.dump(apps, f, indent=2)# Create backup
cp data/jobs.db data/jobs_backup_$(date +%Y%m%d).db
# Restore
cp data/jobs_backup_20240101.db data/jobs.db# WARNING: Deletes all data
rm data/jobs.db
python cli.py init-profile- Batch Operations: Discover 50+ jobs at once, then apply in batches
- Off-Peak Hours: Run during off-peak hours for better response rates
- Rate Limits: Don't exceed daily limits to avoid being flagged
- Quality Over Quantity: Higher scores = better matches = more responses
- Consistent Schedule: Run on a predictable schedule for optimal results
- Check logs:
logs/agent.log - Review documentation: README.md, ARCHITECTURE.md
- Report issues: GitHub issues
- Community: Discord/Slack channel