National Park Service Job Postings Analysis

Author

Abigail Haddad

Published

November 3, 2025

This analysis examines federal job posting trends for the National Park Service from 2018 through the last complete month of available data.

Resources: GitHub Repository | Field Documentation | USAJobs API

Data Loading and Preparation

Code
import pandas as pd
import numpy as np
from datetime import datetime
import calendar
import json

from great_tables import GT, md

def create_standard_gt_table(data, title, subtitle="", align_left_cols=None, align_center_cols=None, col_widths=None, include_source=True):
    """Create a standardized Great Tables table with common formatting"""
    # Add USAJobs data attribution to subtitle if not already included
    if include_source and subtitle and "USAJobs" not in subtitle:
        subtitle = f"{subtitle} | USAJobs Historical Data"
    elif include_source and not subtitle:
        subtitle = "USAJobs Historical Data"
    
    # Start with basic table
    gt_table = GT(data.reset_index(drop=True))
    gt_table = gt_table.tab_header(title=title, subtitle=subtitle)
    gt_table = gt_table.tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
    
    # Apply alignments
    if align_left_cols:
        gt_table = gt_table.cols_align(align="left", columns=align_left_cols)
    if align_center_cols:
        gt_table = gt_table.cols_align(align="center", columns=align_center_cols)
    
    # Apply widths
    if col_widths:
        gt_table = gt_table.cols_width(col_widths)
    
    # Apply options without width constraints
    gt_table = gt_table.tab_options(
        quarto_disable_processing=True
    )
    
    return gt_table

def get_current_datetime():
    """Get current date and time info for consistent usage"""
    current_date = datetime.now()
    return {
        'date': current_date,
        'year': current_date.year,
        'month': current_date.month,
        'formatted': current_date.strftime('%Y-%m-%d %H:%M:%S')
    }

def extract_series(job_categories):
    """Extract occupational series from JobCategories JSON field"""
    try:
        if pd.isna(job_categories):
            return 'Unknown'
        categories = json.loads(job_categories)
        if categories and len(categories) > 0 and 'series' in categories[0]:
            return categories[0]['series']
        return 'Unknown'
    except:
        return 'Unknown'

def categorize_appointment(appt_type):
    """Categorize appointment types into Permanent, Term/Temporary, or Other"""
    if appt_type == 'Permanent':
        return 'Permanent'
    elif appt_type in ['Term', 'Temporary', 'Seasonal', 'Summer', 'Intermittent', 'Internships']:
        return 'Term/Temporary'
    else:
        return 'Other'

def load_nps_data():
    """Load and prepare National Park Service job data"""
    # Load all years from 2018 onwards
    years = range(2018, 2026)
    all_data = []
    year_counts = []
    
    for year in years:
        # Load historical data
        try:
            df = pd.read_parquet(f'../../data/historical_jobs_{year}.parquet')
            year_counts.append({'Year': year, 'Jobs Loaded': f"{len(df):,}"})
            all_data.append(df)
        except FileNotFoundError:
            year_counts.append({'Year': year, 'Jobs Loaded': "No data"})
        
        # Load current data if available and deduplicate
        try:
            current_df = pd.read_parquet(f'../../data/current_jobs_{year}.parquet')
            if len(current_df) > 0:
                # Deduplicate by usajobsControlNumber before combining
                existing_control_numbers = set(df['usajobsControlNumber']) if 'df' in locals() else set()
                new_current_jobs = current_df[~current_df['usajobsControlNumber'].isin(existing_control_numbers)]
                if len(new_current_jobs) > 0:
                    all_data.append(new_current_jobs)
                    year_counts[-1]['Jobs Loaded'] += f" + {len(new_current_jobs):,} current"
        except FileNotFoundError:
            pass
    
    # Create data loading summary table
    loading_summary = pd.DataFrame(year_counts)
    
    # Combine all years
    combined_df = pd.concat(all_data, ignore_index=True)
    
    # Convert dates with mixed format handling
    combined_df['positionOpenDate'] = pd.to_datetime(combined_df['positionOpenDate'], format='mixed')
    combined_df['year'] = combined_df['positionOpenDate'].dt.year
    combined_df['month'] = combined_df['positionOpenDate'].dt.month
    
    # Dynamically determine the last complete month
    # If we're on the 2nd of the month or later, consider the previous month complete
    today = datetime.now()
    if today.day >= 2:
        last_complete_year = today.year
        last_complete_month = today.month - 1
        if last_complete_month == 0:
            last_complete_month = 12
            last_complete_year = today.year - 1
    else:
        # If it's the 1st, use two months ago as the last complete month
        last_complete_year = today.year
        last_complete_month = today.month - 2
        if last_complete_month <= 0:
            last_complete_month = 12 + last_complete_month
            last_complete_year = today.year - 1
    
    # Filter to only include data through the last complete month
    combined_df = combined_df[
        (combined_df['year'] < last_complete_year) | 
        ((combined_df['year'] == last_complete_year) & (combined_df['month'] <= last_complete_month))
    ].copy()
    
    print(f"Data includes postings through {calendar.month_name[last_complete_month]} {last_complete_year}")
    
    # Filter for National Park Service
    nps_df = combined_df[combined_df['hiringAgencyName'] == 'National Park Service'].copy()
    
    # Extract occupational series and categorize appointments
    nps_df['occupational_series'] = nps_df['JobCategories'].apply(extract_series)
    nps_df['appt_category'] = nps_df['appointmentType'].apply(categorize_appointment)
    
    # Create summary stats
    loading_stats = pd.DataFrame({
        'Metric': ['Total jobs loaded', 'National Park Service jobs', 'Data coverage'],
        'Value': [
            f"{len(combined_df):,}",
            f"{len(nps_df):,}",
            f"{len(year_counts)} years (2018-{calendar.month_name[last_complete_month]} {last_complete_year})"
        ]
    })
    
    return nps_df, loading_summary, loading_stats

# Load data
nps_df, loading_summary, loading_stats = load_nps_data()

# Create filtered datasets for year-over-year comparison
# For 2025, use all available months; for historical, use the same months
max_2025_month = nps_df[nps_df['year'] == 2025]['month'].max() if len(nps_df[nps_df['year'] == 2025]) > 0 else 12
comparison_months = list(range(1, max_2025_month + 1))
month_names = [calendar.month_name[m][:3] for m in comparison_months]
comparison_period = f"{month_names[0]}-{month_names[-1]}"

# Filter datasets to same months for fair comparison
nps_comparison_months = nps_df[nps_df['month'].isin(comparison_months)].copy()
nps_2025_comparison = nps_comparison_months[nps_comparison_months['year'] == 2025]
nps_historical_comparison = nps_comparison_months[nps_comparison_months['year'].between(2018, 2024)]

# Display data loading summary as Great Table
gt_loading_stats = (
    GT(loading_stats.reset_index(drop=True))
    .tab_header(
        title="Data Loading & Filtering Summary",
        subtitle="USAJobs Data Processing Results"
    )
    .cols_align(
        align="left",
        columns=["Metric"]
    )
    .cols_align(
        align="center",
        columns=["Value"]
    )
    .cols_width({
        "Metric": "60%",
        "Value": "40%"
    })
    .tab_options(quarto_disable_processing=True)
)
gt_loading_stats.show()

# Show appointment type categorization as Great Table
appt_breakdown = pd.DataFrame({
    'Appointment Type': nps_df['appointmentType'].value_counts().index,
    'Count': nps_df['appointmentType'].value_counts().values,
    'Category': [categorize_appointment(x) for x in nps_df['appointmentType'].value_counts().index]
})

gt_appt = (
    create_standard_gt_table(
        data=appt_breakdown,
        title="Appointment Type Categorization",
        subtitle="National Park Service Job Types (2018-2025)",
        align_left_cols=["Appointment Type", "Category"],
        align_center_cols=["Count"],
        col_widths={"Appointment Type": "45%", "Count": "20%", "Category": "35%"}
    )
    .fmt_number(columns=["Count"], sep_mark=",", decimals=0)
)
gt_appt.show()
Data includes postings through October 2025
Data Loading & Filtering Summary
USAJobs Data Processing Results
Metric Value
Total jobs loaded 2,780,160
National Park Service jobs 38,460
Data coverage 8 years (2018-October 2025)
Appointment Type Categorization
National Park Service Job Types (2018-2025) | USAJobs Historical Data
Appointment Type Count Category
Permanent 22,586 Permanent
Temporary 10,990 Term/Temporary
Term 2,108 Term/Temporary
Internships 791 Term/Temporary
Multiple 669 Other
Temporary Promotion 552 Other
Seasonal 523 Term/Temporary
Recent graduates 81 Other
Temporary promotion 77 Other
ICTAP Only 38 Other
Agency Employees Only 22 Other
Detail 17 Other
Summer 4 Term/Temporary
Intermittent 2 Term/Temporary
Source: github.com/abigailhaddad/usajobs_historical

Monthly Hiring Heatmaps

Code
# Get current date to limit display
dt_info = get_current_datetime()
current_year = dt_info['year']
current_month = dt_info['month']

# Get the last complete month from our data (calculated during loading)
# This will be used to filter out future months in visualizations
last_complete_info = nps_df['year'].max(), nps_df[nps_df['year'] == nps_df['year'].max()]['month'].max()

def should_show_month(year, month):
    last_year, last_month = last_complete_info
    if year < last_year:
        return True
    elif year == last_year:
        return month <= last_month
    else:
        return False

# Constants
MONTH_LABELS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 
                'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']

def create_heatmap_table(df_subset, title, subtitle=""):
    """Create a Great Tables heatmap-style table"""
    monthly_breakdown = df_subset.groupby(['year', 'month']).size().reset_index(name='job_count')
    monthly_pivot = monthly_breakdown.pivot(index='month', columns='year', values='job_count').fillna(0)
    
    # Mask future months
    for year in monthly_pivot.columns:
        for month in monthly_pivot.index:
            if not should_show_month(year, month):
                monthly_pivot.loc[month, year] = np.nan
    
    # Add month names
    monthly_pivot.index = MONTH_LABELS
    
    # Reset index to make month a column
    monthly_pivot_reset = monthly_pivot.reset_index()
    monthly_pivot_reset.columns.name = None
    monthly_pivot_reset = monthly_pivot_reset.rename(columns={'index': 'Month'})
    
    # Get year columns for formatting - convert to strings to ensure proper handling
    year_cols = [str(col) for col in monthly_pivot_reset.columns if str(col) != 'Month']
    
    # Create color scale values for the data
    max_val = monthly_pivot.max().max()
    
    # Rename columns to strings for Great Tables
    monthly_pivot_reset.columns = [str(col) for col in monthly_pivot_reset.columns]
    
    # Keep subtitle as-is for heatmaps (they already have repo link in footnote)
    
    gt_heatmap = (
        GT(monthly_pivot_reset)
        .tab_header(title=title, subtitle=subtitle)
        .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
        .fmt_number(columns=year_cols, decimals=0, sep_mark=",")
        .data_color(
            columns=year_cols,
            palette=["white", "orange", "darkred"],
            domain=[0, max_val],
            na_color="lightgray"
        )
        .cols_align(align="center", columns=year_cols)
        .cols_align(align="left", columns=["Month"])
        .tab_options(quarto_disable_processing=True)
    )
    
    gt_heatmap.show()

# Create heatmap summary data
def create_appointment_summary(df):
    """Create summary of job counts by appointment category"""
    permanent_count = len(df[df['appt_category'] == 'Permanent'])
    temp_count = len(df[df['appt_category'] == 'Term/Temporary'])
    total_count = len(df)
    
    return pd.DataFrame({
        'Category': ['All NPS Positions', 'Permanent Positions', 'Term/Temporary Positions'],
        'Job Count': [f"{total_count:,}", f"{permanent_count:,}", f"{temp_count:,}"],
        'Percentage': ["100%", f"{permanent_count/total_count*100:.0f}%", f"{temp_count/total_count*100:.0f}%"]
    })

heatmap_summary = create_appointment_summary(nps_df)
gt_heatmap_summary = create_standard_gt_table(
    data=heatmap_summary,
    title="Heatmap Categories Summary",
    subtitle="National Park Service Job Distribution by Appointment Type",
    align_left_cols=["Category"],
    align_center_cols=["Job Count", "Percentage"],
    col_widths={"Category": "50%", "Job Count": "25%", "Percentage": "25%"}
)
gt_heatmap_summary.show()

# 1. All NPS jobs
create_heatmap_table(nps_df, 
                    "National Park Service - All USAJobs Postings by Month and Year",
                    "")

# 2. Permanent positions only
permanent_df = nps_df[nps_df['appt_category'] == 'Permanent']
create_heatmap_table(permanent_df, 
                    "National Park Service - Permanent USAJobs Positions",
                    "")

# 3. Term/Temporary positions only  
temp_df = nps_df[nps_df['appt_category'] == 'Term/Temporary']
create_heatmap_table(temp_df, 
                    "National Park Service - Term/Temporary USAJobs Positions",
                    "")
Heatmap Categories Summary
National Park Service Job Distribution by Appointment Type | USAJobs Historical Data
Category Job Count Percentage
All NPS Positions 38,460 100%
Permanent Positions 22,586 59%
Term/Temporary Positions 14,418 37%
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - All USAJobs Postings by Month and Year
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 739 220 727 288 549 574 512 326
Feb 563 621 459 345 424 482 401 58
Mar 459 537 451 437 600 503 448 104
Apr 432 513 342 371 509 436 432 119
May 359 405 343 324 445 434 398 81
Jun 361 351 338 323 430 424 344 84
Jul 342 480 350 368 327 426 427 133
Aug 346 449 341 313 494 454 387 131
Sep 299 359 311 332 350 370 400 103
Oct 371 412 371 349 415 419 526 40
Nov 388 436 344 465 559 549 554
Dec 500 632 844 717 632 522 698
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - Permanent USAJobs Positions
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 181 55 272 116 252 303 255 168
Feb 180 201 228 212 265 308 228 1
Mar 179 268 289 271 412 351 293 7
Apr 219 322 260 269 359 323 285 37
May 205 290 277 272 346 341 284 38
Jun 248 258 262 272 359 326 243 43
Jul 242 343 275 281 247 305 276 55
Aug 256 348 263 236 392 358 266 52
Sep 212 281 247 251 293 237 236 49
Oct 236 282 251 243 296 245 348 2
Nov 208 239 166 225 298 257 227
Dec 170 227 236 230 271 201 294
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - Term/Temporary USAJobs Positions
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 545 162 439 161 271 258 226 150
Feb 354 400 209 124 143 157 148 57
Mar 259 258 143 141 157 141 139 97
Apr 199 171 59 83 125 93 126 80
May 134 105 43 41 73 80 96 43
Jun 99 82 57 37 59 76 87 41
Jul 87 113 63 68 65 110 130 77
Aug 75 78 60 64 82 86 99 77
Sep 74 69 39 50 44 122 152 51
Oct 124 117 108 87 107 161 170 38
Nov 169 192 166 218 235 275 305
Dec 315 396 574 468 338 310 382
Source: github.com/abigailhaddad/usajobs_historical

2025 vs Previous Years: What’s Being Hired Less

Code
def analyze_occupation_changes():
    """Analyze Jan-Jun 2018-2024 vs Jan-Jun 2025 occupational series changes"""
    # Load occupation mapping
    occ_mapping = pd.read_csv('../DTocc.txt')
    occ_dict = dict(zip(occ_mapping['OCC'].astype(str).str.zfill(4), occ_mapping['OCCT']))
    
    # Create summary table first
    # Calculate average for historical period
    historical_avg = len(nps_historical_comparison) / 7  # 7 years (2018-2024)
    pct_change = ((len(nps_2025_comparison) - historical_avg) / historical_avg) * 100
    summary_data = pd.DataFrame({
        'Period': [f'2025 {comparison_period} jobs', f'Historical {comparison_period} average (2018-2024)', 'Change'],
        'Count': [f"{len(nps_2025_comparison):,}", f"{historical_avg:,.0f}", f"{pct_change:.0f}%"]
    })
    
    gt_scope = create_standard_gt_table(
        data=summary_data,
        title="Analysis Scope",
        subtitle=f"{comparison_period} Comparison Only",
        align_left_cols=["Period"],
        align_center_cols=["Count"],
        col_widths={"Period": "70%", "Count": "30%"}
    )
    gt_scope.show()
    
    # Get 2025 counts by occupational series for comparison period
    occ_2025 = nps_2025_comparison['occupational_series'].value_counts()
    
    # Get historical average by occupational series for comparison period
    occ_historical = nps_historical_comparison.groupby(['year', 'occupational_series']).size().reset_index(name='count')
    occ_historical_avg = occ_historical.groupby('occupational_series')['count'].mean()
    
    # Compare and find biggest changes
    comparison_data = []
    all_series = set(occ_2025.index) | set(occ_historical_avg.index)
    
    for series in all_series:
        if pd.notna(series) and series != 'Unknown':
            count_2025 = occ_2025.get(series, 0)
            avg_historical = occ_historical_avg.get(series, 0)
            
            if avg_historical >= 2:  # Only meaningful changes
                difference = count_2025 - avg_historical
                pct_change = ((count_2025 - avg_historical) / avg_historical) * 100 if avg_historical > 0 else 0
                occ_title = occ_dict.get(series, f"Series {series}")
                comparison_data.append({
                    'Occupation': occ_title,
                    'Historical\nAvg': round(avg_historical),
                    '2025\nActual': count_2025,
                    'Change': round(difference),
                    '% Change': round(pct_change, 0)
                })
    
    # Convert to DataFrame and sort by absolute change (biggest changes first)
    comparison_df = pd.DataFrame(comparison_data)
    comparison_df['abs_change'] = abs(comparison_df['Change'])
    comparison_df = comparison_df.sort_values('abs_change', ascending=False)
    comparison_df = comparison_df.drop('abs_change', axis=1)
    
    return comparison_df

# Analyze changes
changes_df = analyze_occupation_changes()

# Display top 10
gt_df_top10 = changes_df.head(10).reset_index(drop=True)

# Create wider table for occupation names
gt_table_top10 = (
    GT(gt_df_top10.reset_index(drop=True))
    .tab_header(
        title="National Park Service: Top 10 Occupations by Biggest Changes",
        subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025 | USAJobs Historical Data"
    )
    .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
    .cols_align(align="left", columns=["Occupation"])
    .cols_align(align="center", columns=["Historical\nAvg", "2025\nActual", "Change", "% Change"])
    .cols_width({
        "Occupation": "45%",
        "Historical\nAvg": "18%",
        "2025\nActual": "12%", 
        "Change": "12%",
        "% Change": "13%"
    })
    .fmt_number(columns=["Historical\nAvg", "Change"], decimals=0)
    .fmt_number(columns=["2025\nActual"], decimals=0)
    .fmt_number(columns=["% Change"], decimals=0, pattern="{x}%")
    .data_color(
        columns=["% Change"],
        palette=["red", "white", "green"],
        domain=[-100, 50]
    )
    .tab_options(quarto_disable_processing=True)
)

gt_table_top10.show()

# Show expandable section for all results if there are more than 10
if len(changes_df) > 10:
    from IPython.display import display, HTML
    
    # Create collapsible HTML section
    expand_html = f"""
    <details style="margin-top: 20px;">
    <summary style="cursor: pointer; font-weight: bold; padding: 10px; background-color: #f0f0f0; border-radius: 5px;">
    📋 Show all {len(changes_df)} occupations
    </summary>
    <div style="margin-top: 10px;">
    """
    
    display(HTML(expand_html))
    
    # Display all results
    gt_df_all = changes_df.reset_index(drop=True)
    
    # Create wider table for all occupations  
    gt_table_all = (
        GT(gt_df_all.reset_index(drop=True))
        .tab_header(
            title="National Park Service: All Occupational Changes",
            subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025 | USAJobs Historical Data"
        )
        .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
        .cols_align(align="left", columns=["Occupation"])
        .cols_align(align="center", columns=["Historical\nAvg", "2025\nActual", "Change", "% Change"])
        .cols_width({
            "Occupation": "45%",
            "Historical\nAvg": "18%",
            "2025\nActual": "12%", 
            "Change": "12%",
            "% Change": "13%"
        })
        .fmt_number(columns=["Historical\nAvg", "Change"], decimals=0)
        .fmt_number(columns=["2025\nActual"], decimals=0)
        .fmt_number(columns=["% Change"], decimals=0, pattern="{x}%")
        .data_color(
            columns=["% Change"],
            palette=["red", "white", "green"],
            domain=[-100, 50]
        )
        .tab_options(quarto_disable_processing=True)
    )
    
    gt_table_all.show()
    
    display(HTML("</div></details>"))
Analysis Scope
Jan-Oct Comparison Only | USAJobs Historical Data
Period Count
2025 Jan-Oct jobs 1,179
Historical Jan-Oct average (2018-2024) 4,206
Change -72%
Source: github.com/abigailhaddad/usajobs_historical
National Park Service: Top 10 Occupations by Biggest Changes
Jan-Oct 2018-2024 vs Jan-Oct 2025 | USAJobs Historical Data
Occupation Historical Avg 2025 Actual Change % Change
4749-MAINTENANCE MECHANIC 528 128 −400 −76%
0025-PARK RANGER 588 231 −357 −61%
0303-MISCELLANEOUS CLERK AND ASSISTANT 244 50 −194 −80%
0404-BIOLOGICAL SCIENCE TECHNICIAN 185 61 −124 −67%
0090-GUIDE 158 48 −110 −70%
0401-GENERAL NATURAL RESOURCES MANAGEMENT AND BIOLOGICAL SCIENCES 104 5 −99 −95%
1640-FACILITY OPERATIONS SERVICES 90 3 −87 −97%
0462-FORESTRY TECHNICIAN 88 1 −87 −99%
0301-MISCELLANEOUS ADMINISTRATION AND PROGRAM 82 7 −75 −91%
2210-INFORMATION TECHNOLOGY MANAGEMENT 71 4 −67 −94%
Source: github.com/abigailhaddad/usajobs_historical
📋 Show all 156 occupations
National Park Service: All Occupational Changes
Jan-Oct 2018-2024 vs Jan-Oct 2025 | USAJobs Historical Data
Occupation Historical Avg 2025 Actual Change % Change
4749-MAINTENANCE MECHANIC 528 128 −400 −76%
0025-PARK RANGER 588 231 −357 −61%
0303-MISCELLANEOUS CLERK AND ASSISTANT 244 50 −194 −80%
0404-BIOLOGICAL SCIENCE TECHNICIAN 185 61 −124 −67%
0090-GUIDE 158 48 −110 −70%
0401-GENERAL NATURAL RESOURCES MANAGEMENT AND BIOLOGICAL SCIENCES 104 5 −99 −95%
1640-FACILITY OPERATIONS SERVICES 90 3 −87 −97%
0462-FORESTRY TECHNICIAN 88 1 −87 −99%
0301-MISCELLANEOUS ADMINISTRATION AND PROGRAM 82 7 −75 −91%
2210-INFORMATION TECHNOLOGY MANAGEMENT 71 4 −67 −94%
0560-BUDGET ANALYSIS 70 6 −64 −91%
1101-GENERAL BUSINESS AND INDUSTRY 59 1 −58 −98%
0341-ADMINISTRATIVE OFFICER 57 1 −56 −98%
0201-HUMAN RESOURCES MANAGEMENT 56 1 −55 −98%
3502-LABORING 100 49 −51 −51%
5716-ENGINEERING EQUIPMENT OPERATING 68 18 −50 −73%
0503-FINANCIAL CLERICAL AND ASSISTANCE 92 45 −47 −51%
0193-ARCHEOLOGY 50 4 −46 −92%
1601-EQUIPMENT FACILITIES, AND SERVICES 40 0 −40 −100%
0807-LANDSCAPE ARCHITECTURE 38 0 −38 −100%
0099-GENERAL STUDENT TRAINEE 45 12 −33 −73%
0028-ENVIRONMENTAL PROTECTION SPECIALIST 33 0 −33 −100%
5703-MOTOR VEHICLE OPERATING 44 14 −30 −68%
1301-GENERAL PHYSICAL SCIENCE 30 1 −29 −97%
1102-CONTRACTING 28 0 −28 −100%
0170-HISTORY 27 1 −26 −96%
0810-CIVIL ENGINEERING 27 2 −25 −93%
0408-ECOLOGY 27 3 −24 −89%
1015-MUSEUM CURATOR 25 1 −24 −96%
0343-MANAGEMENT AND PROGRAM ANALYSIS 28 5 −23 −82%
3566-CUSTODIAL WORKING 41 19 −22 −54%
1702-EDUCATION AND TRAINING TECHNICIAN 38 17 −21 −55%
0501-FINANCIAL ADMINISTRATION AND PROGRAM 24 3 −21 −87%
0808-ARCHITECTURE 21 1 −20 −95%
0023-OUTDOOR RECREATION PLANNING 23 3 −20 −87%
1035-PUBLIC AFFAIRS 19 0 −19 −100%
1603-EQUIPMENT, FACILITIES, AND SERVICES ASSISTANCE 23 5 −18 −78%
0801-GENERAL ENGINEERING 18 0 −18 −100%
3603-MASONRY 28 11 −17 −60%
1016-MUSEUM SPECIALIST AND TECHNICIAN 36 20 −16 −44%
0203-HUMAN RESOURCES ASSISTANCE 17 1 −16 −94%
2151-DISPATCHING 61 45 −16 −26%
1084-VISUAL INFORMATION 26 11 −15 −57%
0399-ADMINISTRATION AND OFFICE SUPPORT STUDENT TRAINEE 15 0 −15 −100%
0102-SOCIAL SCIENCE AID AND TECHNICIAN 28 13 −15 −54%
5003-GARDENING 23 9 −14 −62%
1712-TRAINING INSTRUCTION 14 1 −13 −93%
4607-CARPENTRY 16 3 −13 −81%
2805-ELECTRICIAN 14 1 −13 −93%
0561-BUDGET CLERICAL AND ASSISTANCE 14 1 −13 −93%
0340-PROGRAM MANAGEMENT 20 7 −13 −65%
4742-UTILITY SYSTEMS REPAIRING-OPERATING 34 46 12 35%
4701-MISC GENERAL MAINTENANCE & OPERATIONS WORK 22 10 −12 −54%
5001-MISCELLANEOUS PLANT AND ANIMAL WORK 18 6 −12 −67%
1170-REALTY 15 3 −12 −80%
1010-EXHIBITS SPECIALIST 12 1 −11 −92%
5803-HEAVY MOBILE EQUIPMENT MECHANIC 12 1 −11 −92%
5823-AUTOMOTIVE MECHANIC 13 3 −10 −77%
5042-TREE TRIMMING AND REMOVING 12 2 −10 −83%
5705-TRACTOR OPERATING 20 10 −10 −51%
0189-RECREATION AID AND ASSISTANT 26 16 −10 −39%
0486-WILDLIFE BIOLOGY 10 0 −10 −100%
0020-COMMUNITY PLANNING 9 0 −9 −100%
3501-MISC GENERAL SERVICES AND SUPPORT WORK 10 1 −9 −90%
1109-GRANTS MANAGEMENT 10 1 −9 −90%
1311-PHYSICAL SCIENCE TECHNICIAN 13 4 −9 −69%
5406-UTILITY SYSTEMS OPERATING 9 1 −8 −89%
0391-TELECOMMUNICATIONS 10 2 −8 −79%
2005-SUPPLY CLERICAL AND TECHNICIAN 9 1 −8 −89%
0802-ENGINEERING TECHNICAL 8 0 −8 −100%
0809-CONSTRUCTION CONTROL TECHNICAL 9 1 −8 −89%
0190-GENERAL ANTHROPOLOGY 9 1 −8 −89%
1171-APPRAISING 8 0 −8 −100%
0018-SAFETY AND OCCUPATIONAL HEALTH MANAGEMENT 20 12 −8 −39%
1315-HYDROLOGY 7 0 −7 −100%
0101-SOCIAL SCIENCE 7 0 −7 −100%
1001-GENERAL ARTS AND INFORMATION 9 2 −7 −78%
0318-SECRETARY 7 0 −7 −100%
0499-BIOLOGICAL SCIENCE STUDENT TRAINEE 9 3 −6 −68%
2101-TRANSPORTATION SPECIALIST 6 0 −6 −100%
0260-EQUAL EMPLOYMENT OPPORTUNITY 6 0 −6 −100%
1701-GENERAL EDUCATION AND TRAINING 6 1 −5 −82%
1350-GEOLOGY 5 0 −5 −100%
0080-SECURITY ADMINISTRATION 7 2 −5 −71%
0081-FIRE PROTECTION AND PREVENTION 3 8 5 195%
1370-CARTOGRAPHY 5 1 −4 −79%
5786-SMALL CRAFT OPERATING 9 5 −4 −46%
0437-HORTICULTURE 4 0 −4 −100%
2181-AIRCRAFT OPERATION 5 1 −4 −81%
0430-BOTANY 4 0 −4 −100%
0830-MECHANICAL ENGINEERING 4 0 −4 −100%
0599-FINANCIAL MANAGEMENT STUDENT TRAINEE 5 1 −4 −80%
0454-RANGELAND MANAGEMENT 4 0 −4 −100%
1811-CRIMINAL INVESTIGATION 2 6 4 140%
0899-ENGINEERING AND ARCHITECTURE STUDENT TRAINEE 4 0 −4 −100%
4605-WOOD CRAFTING 3 0 −3 −100%
5306-AIR CONDITIONING EQUIPMENT MECHANIC 3 0 −3 −100%
5026-PEST CONTROLLING 3 0 −3 −100%
0510-ACCOUNTING 3 0 −3 −100%
4206-PLUMBING 4 1 −3 −76%
0856-ELECTRONICS TECHNICAL 3 0 −3 −100%
1420-ARCHIVIST 6 3 −3 −51%
1373-LAND SURVEYING 3 0 −3 −100%
5409-WATER TREATMENT PLANT OPERATING 3 0 −3 −100%
2003-SUPPLY PROGRAM MANAGEMENT 3 0 −3 −100%
0199-SOCIAL SCIENCE STUDENT TRAINEE 3 0 −3 −100%
5048-ANIMAL CARETAKING 3 0 −3 −100%
0085-SECURITY GUARD 5 2 −3 −60%
1371-CARTOGRAPHIC TECHNICIAN 9 6 −3 −31%
0346-LOGISTICS MANAGEMENT 3 1 −2 −67%
1176-BUILDING MANAGEMENT 2 0 −2 −100%
1910-QUALITY ASSURANCE 2 0 −2 −100%
1699-EQUIPMENT AND FACILITIES MANAGEMENT STUDENT TRAINEE 2 0 −2 −100%
1750-INSTRUCTIONAL SYSTEMS 2 0 −2 −100%
0180-PSYCHOLOGY 2 0 −2 −100%
0855-ELECTRONICS ENGINEERING 2 0 −2 −100%
2604-ELECTRONICS MECHANIC 2 0 −2 −100%
0701-VETERINARY MEDICAL SCIENCE 2 0 −2 −100%
0150-GEOGRAPHY 2 0 −2 −100%
6907-MATERIALS HANDLER 2 0 −2 −100%
1082-WRITING AND EDITING 2 0 −2 −100%
0335-COMPUTER CLERK AND ASSISTANT 3 1 −2 −63%
0394-COMMUNICATIONS CLERICAL 2 0 −2 −100%
0803-SAFETY ENGINEERING 2 0 −2 −100%
1173-HOUSING MANAGEMENT 5 3 −2 −34%
1421-ARCHIVES TECHNICIAN 4 2 −2 −50%
1083-TECHNICAL WRITING AND EDITING 2 0 −2 −100%
2102-TRANSPORTATION CLERK AND ASSISTANT 2 0 −2 −100%
2299-INFORMATION TECHNOLOGY STUDENT TRAINEE 2 0 −2 −100%
0505-FINANCIAL MANAGEMENT 2 0 −2 −100%
1105-PURCHASING 2 0 −2 −100%
0456-WILDLAND FIRE MANAGEMENT 80 77 −2 −3%
0019-SAFETY TECHNICIAN 2 0 −2 −100%
4204-PIPEFITTING 2 0 −2 −100%
1799-EDUCATION STUDENT TRAINEE 2 0 −2 −100%
0828-CONSTRUCTION ANALYST 2 0 −2 −100%
0850-ELECTRICAL ENGINEERING 4 1 −2 −71%
1106-PROCUREMENT CLERICAL AND TECHNICIAN 2 0 −2 −100%
2810-HIGH VOLTAGE ELECTRICIAN 2 0 −2 −100%
1099-INFORMATION AND ARTS STUDENT TRAINEE 2 0 −2 −100%
0299-HUMAN RESOURCES MANAGEMENT STUDENT TRAINEE 2 0 −2 −100%
2010-INVENTORY MANAGEMENT 2 0 −2 −100%
0819-ENVIRONMENTAL ENGINEERING 2 0 −2 −100%
0306-GOVERNMENT INFORMATION SPECIALIST 3 1 −2 −65%
4104-SIGN PAINTING 3 4 1 41%
0482-FISH BIOLOGY 3 2 −1 −36%
0640-HEALTH AID AND TECHNICIAN 8 9 1 9%
1316-HYDROLOGIC TECHNICIAN 4 5 1 30%
4102-PAINTING 6 5 −1 −19%
0455-RANGE TECHNICIAN 3 4 1 54%
0804-FIRE PROTECTION ENGINEERING 2 1 −1 −50%
0083-POLICE 5 5 0 3%
0326-OFFICE AUTOMATION CLERICAL AND ASSISTANCE 4 4 0 4%
2150-TRANSPORTATION OPERATIONS 2 2 0 0%
5788-DECKHAND 2 2 0 −9%
1341-METEOROLOGICAL TECHNICIAN 2 2 0 0%
Source: github.com/abigailhaddad/usajobs_historical

Are There More Group Announcements in 2025?

Code
def categorize_openings(opening_val):
    """Categorize openings (MANY=Many, FEW=Few, etc.)"""
    opening_str = str(opening_val).upper()
    if opening_str in ['MANY', 'FEW', 'SEVERAL']:
        return opening_str.title()
    else:
        try:
            return str(int(float(opening_val)))
        except:
            return 'Other'

def analyze_opening_types():
    f"""Analyze opening types comparison between {comparison_period} 2018-2024 and {comparison_period} 2025"""
    # Use pre-filtered comparison period datasets and filter for jobs with opening data
    openings_2025 = nps_2025_comparison[nps_2025_comparison['totalOpenings'].notna()].copy()
    openings_historical = nps_historical_comparison[nps_historical_comparison['totalOpenings'].notna()].copy()
    
    # Apply categorization
    openings_2025.loc[:, 'opening_category'] = openings_2025['totalOpenings'].apply(categorize_openings)
    openings_historical.loc[:, 'opening_category'] = openings_historical['totalOpenings'].apply(categorize_openings)
    
    # Get top 10 categories for each period
    hist_top10 = openings_historical['opening_category'].value_counts().head(10)
    curr_top10 = openings_2025['opening_category'].value_counts().head(10)
    
    # Calculate percentages
    hist_pcts = (hist_top10 / len(openings_historical) * 100).round(0)
    curr_pcts = (curr_top10 / len(openings_2025) * 100).round(0)
    
    # Create simple comparison table
    comparison_data = []
    all_categories = set(hist_top10.index) | set(curr_top10.index)

    
    for category in sorted(all_categories):
        hist_pct = hist_pcts.get(category, 0)
        curr_pct = curr_pcts.get(category, 0)
        comparison_data.append({
            'Total Openings': category,
            'Historical\n%': hist_pct,
            '2025\n%': curr_pct,
            'Change': round(curr_pct - hist_pct, 0)
        })
    
    comparison_df = pd.DataFrame(comparison_data)
    comparison_df = comparison_df.sort_values('Historical\n%', ascending=False)
    
    # Display with Great Tables
    gt_openings_df = comparison_df.reset_index(drop=True)
    
    gt_openings = (
        create_standard_gt_table(
            data=gt_openings_df,
            title="National Park Service: Top 10 Total Openings Comparison",
            subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025",
            align_left_cols=["Total Openings"],
            align_center_cols=["Historical\n%", "2025\n%", "Change"],
            col_widths={
                "Total Openings": "35%",
                "Historical\n%": "22%",
                "2025\n%": "22%",
                "Change": "21%"
            }
        )
        .fmt_number(
            columns=["Historical\n%", "2025\n%", "Change"],
            decimals=0,
            pattern="{x}%"
        )
        .data_color(
            columns=["Change"],
            palette=["red", "white", "green"],
            domain=[-25, 25]
        )
    )
    
    gt_openings.show()

# Run the analysis
analyze_opening_types()
National Park Service: Top 10 Total Openings Comparison
Jan-Oct 2018-2024 vs Jan-Oct 2025 | USAJobs Historical Data
Total Openings Historical % 2025 % Change
1 69% 50% −19%
2 10% 16% 6%
Many 7% 16% 9%
Few 5% 4% −1%
3 3% 4% 1%
4 2% 3% 1%
5 1% 1% 0%
6 1% 2% 1%
10 0% 0% 0%
12 0% 1% 1%
8 0% 1% 1%
Source: github.com/abigailhaddad/usajobs_historical

Code
from IPython.display import display, Markdown
dt_info = get_current_datetime()
display(Markdown(f"*Analysis generated on {dt_info['formatted']}*"))

Analysis generated on 2025-11-03 10:10:21