National Park Service Job Postings Analysis

Author

Abigail Haddad

Published

January 3, 2026

This analysis examines federal job posting trends for the National Park Service from 2018 through the last complete month of available data.

Resources: GitHub Repository | Field Documentation | USAJobs API

Data Loading and Preparation

Code
import pandas as pd
import numpy as np
from datetime import datetime
import calendar
import json

from great_tables import GT, md

def create_standard_gt_table(data, title, subtitle="", align_left_cols=None, align_center_cols=None, col_widths=None, include_source=True):
    """Create a standardized Great Tables table with common formatting"""
    # Add USAJobs data attribution to subtitle if not already included
    if include_source and subtitle and "USAJobs" not in subtitle:
        subtitle = f"{subtitle} | USAJobs Historical Data"
    elif include_source and not subtitle:
        subtitle = "USAJobs Historical Data"
    
    # Start with basic table
    gt_table = GT(data.reset_index(drop=True))
    gt_table = gt_table.tab_header(title=title, subtitle=subtitle)
    gt_table = gt_table.tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
    
    # Apply alignments
    if align_left_cols:
        gt_table = gt_table.cols_align(align="left", columns=align_left_cols)
    if align_center_cols:
        gt_table = gt_table.cols_align(align="center", columns=align_center_cols)
    
    # Apply widths
    if col_widths:
        gt_table = gt_table.cols_width(col_widths)
    
    # Apply options without width constraints
    gt_table = gt_table.tab_options(
        quarto_disable_processing=True
    )
    
    return gt_table

def get_current_datetime():
    """Get current date and time info for consistent usage"""
    current_date = datetime.now()
    return {
        'date': current_date,
        'year': current_date.year,
        'month': current_date.month,
        'formatted': current_date.strftime('%Y-%m-%d %H:%M:%S')
    }

def extract_series(job_categories):
    """Extract occupational series from JobCategories JSON field"""
    try:
        if pd.isna(job_categories):
            return 'Unknown'
        categories = json.loads(job_categories)
        if categories and len(categories) > 0 and 'series' in categories[0]:
            return categories[0]['series']
        return 'Unknown'
    except:
        return 'Unknown'

def categorize_appointment(appt_type):
    """Categorize appointment types into Permanent, Term/Temporary, or Other"""
    if appt_type == 'Permanent':
        return 'Permanent'
    elif appt_type in ['Term', 'Temporary', 'Seasonal', 'Summer', 'Intermittent', 'Internships']:
        return 'Term/Temporary'
    else:
        return 'Other'

def load_nps_data():
    """Load and prepare National Park Service job data"""
    # Load all years from 2018 onwards
    years = range(2018, 2026)
    all_data = []
    year_counts = []
    
    for year in years:
        # Load historical data
        try:
            df = pd.read_parquet(f'../../data/historical_jobs_{year}.parquet')
            year_counts.append({'Year': year, 'Jobs Loaded': f"{len(df):,}"})
            all_data.append(df)
        except FileNotFoundError:
            year_counts.append({'Year': year, 'Jobs Loaded': "No data"})
        
        # Load current data if available and deduplicate
        try:
            current_df = pd.read_parquet(f'../../data/current_jobs_{year}.parquet')
            if len(current_df) > 0:
                # Deduplicate by usajobsControlNumber before combining
                existing_control_numbers = set(df['usajobsControlNumber']) if 'df' in locals() else set()
                new_current_jobs = current_df[~current_df['usajobsControlNumber'].isin(existing_control_numbers)]
                if len(new_current_jobs) > 0:
                    all_data.append(new_current_jobs)
                    year_counts[-1]['Jobs Loaded'] += f" + {len(new_current_jobs):,} current"
        except FileNotFoundError:
            pass
    
    # Create data loading summary table
    loading_summary = pd.DataFrame(year_counts)
    
    # Combine all years
    combined_df = pd.concat(all_data, ignore_index=True)
    
    # Convert dates with mixed format handling
    combined_df['positionOpenDate'] = pd.to_datetime(combined_df['positionOpenDate'], format='mixed')
    combined_df['year'] = combined_df['positionOpenDate'].dt.year
    combined_df['month'] = combined_df['positionOpenDate'].dt.month
    
    # Dynamically determine the last complete month
    # If we're on the 2nd of the month or later, consider the previous month complete
    today = datetime.now()
    if today.day >= 2:
        last_complete_year = today.year
        last_complete_month = today.month - 1
        if last_complete_month == 0:
            last_complete_month = 12
            last_complete_year = today.year - 1
    else:
        # If it's the 1st, use two months ago as the last complete month
        last_complete_year = today.year
        last_complete_month = today.month - 2
        if last_complete_month <= 0:
            last_complete_month = 12 + last_complete_month
            last_complete_year = today.year - 1
    
    # Filter to only include data through the last complete month
    combined_df = combined_df[
        (combined_df['year'] < last_complete_year) | 
        ((combined_df['year'] == last_complete_year) & (combined_df['month'] <= last_complete_month))
    ].copy()
    
    print(f"Data includes postings through {calendar.month_name[last_complete_month]} {last_complete_year}")
    
    # Filter for National Park Service
    nps_df = combined_df[combined_df['hiringAgencyName'] == 'National Park Service'].copy()
    
    # Extract occupational series and categorize appointments
    nps_df['occupational_series'] = nps_df['JobCategories'].apply(extract_series)
    nps_df['appt_category'] = nps_df['appointmentType'].apply(categorize_appointment)
    
    # Create summary stats
    loading_stats = pd.DataFrame({
        'Metric': ['Total jobs loaded', 'National Park Service jobs', 'Data coverage'],
        'Value': [
            f"{len(combined_df):,}",
            f"{len(nps_df):,}",
            f"{len(year_counts)} years (2018-{calendar.month_name[last_complete_month]} {last_complete_year})"
        ]
    })
    
    return nps_df, loading_summary, loading_stats

# Load data
nps_df, loading_summary, loading_stats = load_nps_data()

# Create filtered datasets for year-over-year comparison
# For 2025, use all available months; for historical, use the same months
max_2025_month = nps_df[nps_df['year'] == 2025]['month'].max() if len(nps_df[nps_df['year'] == 2025]) > 0 else 12
comparison_months = list(range(1, max_2025_month + 1))
month_names = [calendar.month_name[m][:3] for m in comparison_months]
comparison_period = f"{month_names[0]}-{month_names[-1]}"

# Filter datasets to same months for fair comparison
nps_comparison_months = nps_df[nps_df['month'].isin(comparison_months)].copy()
nps_2025_comparison = nps_comparison_months[nps_comparison_months['year'] == 2025]
nps_historical_comparison = nps_comparison_months[nps_comparison_months['year'].between(2018, 2024)]

# Display data loading summary as Great Table
gt_loading_stats = (
    GT(loading_stats.reset_index(drop=True))
    .tab_header(
        title="Data Loading & Filtering Summary",
        subtitle="USAJobs Data Processing Results"
    )
    .cols_align(
        align="left",
        columns=["Metric"]
    )
    .cols_align(
        align="center",
        columns=["Value"]
    )
    .cols_width({
        "Metric": "60%",
        "Value": "40%"
    })
    .tab_options(quarto_disable_processing=True)
)
gt_loading_stats.show()

# Show appointment type categorization as Great Table
appt_breakdown = pd.DataFrame({
    'Appointment Type': nps_df['appointmentType'].value_counts().index,
    'Count': nps_df['appointmentType'].value_counts().values,
    'Category': [categorize_appointment(x) for x in nps_df['appointmentType'].value_counts().index]
})

gt_appt = (
    create_standard_gt_table(
        data=appt_breakdown,
        title="Appointment Type Categorization",
        subtitle="National Park Service Job Types (2018-2025)",
        align_left_cols=["Appointment Type", "Category"],
        align_center_cols=["Count"],
        col_widths={"Appointment Type": "45%", "Count": "20%", "Category": "35%"}
    )
    .fmt_number(columns=["Count"], sep_mark=",", decimals=0)
)
gt_appt.show()
Data includes postings through December 2025
Data Loading & Filtering Summary
USAJobs Data Processing Results
Metric Value
Total jobs loaded 2,805,337
National Park Service jobs 38,481
Data coverage 8 years (2018-December 2025)
Appointment Type Categorization
National Park Service Job Types (2018-2025) | USAJobs Historical Data
Appointment Type Count Category
Permanent 22,599 Permanent
Temporary 10,997 Term/Temporary
Term 2,109 Term/Temporary
Internships 791 Term/Temporary
Multiple 669 Other
Temporary Promotion 552 Other
Seasonal 523 Term/Temporary
Recent graduates 81 Other
Temporary promotion 77 Other
ICTAP Only 38 Other
Agency Employees Only 22 Other
Detail 17 Other
Summer 4 Term/Temporary
Intermittent 2 Term/Temporary
Source: github.com/abigailhaddad/usajobs_historical

Monthly Hiring Heatmaps

Code
# Get current date to limit display
dt_info = get_current_datetime()
current_year = dt_info['year']
current_month = dt_info['month']

# Get the last complete month from our data (calculated during loading)
# This will be used to filter out future months in visualizations
last_complete_info = nps_df['year'].max(), nps_df[nps_df['year'] == nps_df['year'].max()]['month'].max()

def should_show_month(year, month):
    last_year, last_month = last_complete_info
    if year < last_year:
        return True
    elif year == last_year:
        return month <= last_month
    else:
        return False

# Constants
MONTH_LABELS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 
                'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']

def create_heatmap_table(df_subset, title, subtitle=""):
    """Create a Great Tables heatmap-style table"""
    monthly_breakdown = df_subset.groupby(['year', 'month']).size().reset_index(name='job_count')
    monthly_pivot = monthly_breakdown.pivot(index='month', columns='year', values='job_count').fillna(0)
    
    # Mask future months
    for year in monthly_pivot.columns:
        for month in monthly_pivot.index:
            if not should_show_month(year, month):
                monthly_pivot.loc[month, year] = np.nan
    
    # Add month names
    monthly_pivot.index = MONTH_LABELS
    
    # Reset index to make month a column
    monthly_pivot_reset = monthly_pivot.reset_index()
    monthly_pivot_reset.columns.name = None
    monthly_pivot_reset = monthly_pivot_reset.rename(columns={'index': 'Month'})
    
    # Get year columns for formatting - convert to strings to ensure proper handling
    year_cols = [str(col) for col in monthly_pivot_reset.columns if str(col) != 'Month']
    
    # Create color scale values for the data
    max_val = monthly_pivot.max().max()
    
    # Rename columns to strings for Great Tables
    monthly_pivot_reset.columns = [str(col) for col in monthly_pivot_reset.columns]
    
    # Keep subtitle as-is for heatmaps (they already have repo link in footnote)
    
    gt_heatmap = (
        GT(monthly_pivot_reset)
        .tab_header(title=title, subtitle=subtitle)
        .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
        .fmt_number(columns=year_cols, decimals=0, sep_mark=",")
        .data_color(
            columns=year_cols,
            palette=["white", "orange", "darkred"],
            domain=[0, max_val],
            na_color="lightgray"
        )
        .cols_align(align="center", columns=year_cols)
        .cols_align(align="left", columns=["Month"])
        .tab_options(quarto_disable_processing=True)
    )
    
    gt_heatmap.show()

# Create heatmap summary data
def create_appointment_summary(df):
    """Create summary of job counts by appointment category"""
    permanent_count = len(df[df['appt_category'] == 'Permanent'])
    temp_count = len(df[df['appt_category'] == 'Term/Temporary'])
    total_count = len(df)
    
    return pd.DataFrame({
        'Category': ['All NPS Positions', 'Permanent Positions', 'Term/Temporary Positions'],
        'Job Count': [f"{total_count:,}", f"{permanent_count:,}", f"{temp_count:,}"],
        'Percentage': ["100%", f"{permanent_count/total_count*100:.0f}%", f"{temp_count/total_count*100:.0f}%"]
    })

heatmap_summary = create_appointment_summary(nps_df)
gt_heatmap_summary = create_standard_gt_table(
    data=heatmap_summary,
    title="Heatmap Categories Summary",
    subtitle="National Park Service Job Distribution by Appointment Type",
    align_left_cols=["Category"],
    align_center_cols=["Job Count", "Percentage"],
    col_widths={"Category": "50%", "Job Count": "25%", "Percentage": "25%"}
)
gt_heatmap_summary.show()

# 1. All NPS jobs
create_heatmap_table(nps_df, 
                    "National Park Service - All USAJobs Postings by Month and Year",
                    "")

# 2. Permanent positions only
permanent_df = nps_df[nps_df['appt_category'] == 'Permanent']
create_heatmap_table(permanent_df, 
                    "National Park Service - Permanent USAJobs Positions",
                    "")

# 3. Term/Temporary positions only  
temp_df = nps_df[nps_df['appt_category'] == 'Term/Temporary']
create_heatmap_table(temp_df, 
                    "National Park Service - Term/Temporary USAJobs Positions",
                    "")
Heatmap Categories Summary
National Park Service Job Distribution by Appointment Type | USAJobs Historical Data
Category Job Count Percentage
All NPS Positions 38,481 100%
Permanent Positions 22,599 59%
Term/Temporary Positions 14,426 37%
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - All USAJobs Postings by Month and Year
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 739 220 727 288 549 574 512 326
Feb 563 621 459 345 424 482 401 58
Mar 459 537 451 437 600 503 448 104
Apr 432 513 342 371 509 436 432 119
May 359 405 343 324 445 434 398 81
Jun 361 351 338 323 430 424 344 84
Jul 342 480 350 368 327 426 427 133
Aug 346 449 341 313 494 454 387 131
Sep 299 359 311 332 350 370 400 116
Oct 371 412 371 349 415 419 526 41
Nov 388 436 344 465 559 549 554 6
Dec 500 632 844 717 632 522 698 1
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - Permanent USAJobs Positions
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 181 55 272 116 252 303 255 168
Feb 180 201 228 212 265 308 228 1
Mar 179 268 289 271 412 351 293 7
Apr 219 322 260 269 359 323 285 37
May 205 290 277 272 346 341 284 38
Jun 248 258 262 272 359 326 243 43
Jul 242 343 275 281 247 305 276 55
Aug 256 348 263 236 392 358 266 52
Sep 212 281 247 251 293 237 236 60
Oct 236 282 251 243 296 245 348 2
Nov 208 239 166 225 298 257 227 2
Dec 170 227 236 230 271 201 294 0
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - Term/Temporary USAJobs Positions
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 545 162 439 161 271 258 226 150
Feb 354 400 209 124 143 157 148 57
Mar 259 258 143 141 157 141 139 97
Apr 199 171 59 83 125 93 126 80
May 134 105 43 41 73 80 96 43
Jun 99 82 57 37 59 76 87 41
Jul 87 113 63 68 65 110 130 77
Aug 75 78 60 64 82 86 99 77
Sep 74 69 39 50 44 122 152 53
Oct 124 117 108 87 107 161 170 39
Nov 169 192 166 218 235 275 305 4
Dec 315 396 574 468 338 310 382 1
Source: github.com/abigailhaddad/usajobs_historical

2025 vs Previous Years: What’s Being Hired Less

Code
def analyze_occupation_changes():
    """Analyze Jan-Jun 2018-2024 vs Jan-Jun 2025 occupational series changes"""
    # Load occupation mapping
    occ_mapping = pd.read_csv('../DTocc.txt')
    occ_dict = dict(zip(occ_mapping['OCC'].astype(str).str.zfill(4), occ_mapping['OCCT']))
    
    # Create summary table first
    # Calculate average for historical period
    historical_avg = len(nps_historical_comparison) / 7  # 7 years (2018-2024)
    pct_change = ((len(nps_2025_comparison) - historical_avg) / historical_avg) * 100
    summary_data = pd.DataFrame({
        'Period': [f'2025 {comparison_period} jobs', f'Historical {comparison_period} average (2018-2024)', 'Change'],
        'Count': [f"{len(nps_2025_comparison):,}", f"{historical_avg:,.0f}", f"{pct_change:.0f}%"]
    })
    
    gt_scope = create_standard_gt_table(
        data=summary_data,
        title="Analysis Scope",
        subtitle=f"{comparison_period} Comparison Only",
        align_left_cols=["Period"],
        align_center_cols=["Count"],
        col_widths={"Period": "70%", "Count": "30%"}
    )
    gt_scope.show()
    
    # Get 2025 counts by occupational series for comparison period
    occ_2025 = nps_2025_comparison['occupational_series'].value_counts()
    
    # Get historical average by occupational series for comparison period
    occ_historical = nps_historical_comparison.groupby(['year', 'occupational_series']).size().reset_index(name='count')
    occ_historical_avg = occ_historical.groupby('occupational_series')['count'].mean()
    
    # Compare and find biggest changes
    comparison_data = []
    all_series = set(occ_2025.index) | set(occ_historical_avg.index)
    
    for series in all_series:
        if pd.notna(series) and series != 'Unknown':
            count_2025 = occ_2025.get(series, 0)
            avg_historical = occ_historical_avg.get(series, 0)
            
            if avg_historical >= 2:  # Only meaningful changes
                difference = count_2025 - avg_historical
                pct_change = ((count_2025 - avg_historical) / avg_historical) * 100 if avg_historical > 0 else 0
                occ_title = occ_dict.get(series, f"Series {series}")
                comparison_data.append({
                    'Occupation': occ_title,
                    'Historical\nAvg': round(avg_historical),
                    '2025\nActual': count_2025,
                    'Change': round(difference),
                    '% Change': round(pct_change, 0)
                })
    
    # Convert to DataFrame and sort by absolute change (biggest changes first)
    comparison_df = pd.DataFrame(comparison_data)
    comparison_df['abs_change'] = abs(comparison_df['Change'])
    comparison_df = comparison_df.sort_values('abs_change', ascending=False)
    comparison_df = comparison_df.drop('abs_change', axis=1)
    
    return comparison_df

# Analyze changes
changes_df = analyze_occupation_changes()

# Display top 10
gt_df_top10 = changes_df.head(10).reset_index(drop=True)

# Create wider table for occupation names
gt_table_top10 = (
    GT(gt_df_top10.reset_index(drop=True))
    .tab_header(
        title="National Park Service: Top 10 Occupations by Biggest Changes",
        subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025 | USAJobs Historical Data"
    )
    .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
    .cols_align(align="left", columns=["Occupation"])
    .cols_align(align="center", columns=["Historical\nAvg", "2025\nActual", "Change", "% Change"])
    .cols_width({
        "Occupation": "45%",
        "Historical\nAvg": "18%",
        "2025\nActual": "12%", 
        "Change": "12%",
        "% Change": "13%"
    })
    .fmt_number(columns=["Historical\nAvg", "Change"], decimals=0)
    .fmt_number(columns=["2025\nActual"], decimals=0)
    .fmt_number(columns=["% Change"], decimals=0, pattern="{x}%")
    .data_color(
        columns=["% Change"],
        palette=["red", "white", "green"],
        domain=[-100, 50]
    )
    .tab_options(quarto_disable_processing=True)
)

gt_table_top10.show()

# Show expandable section for all results if there are more than 10
if len(changes_df) > 10:
    from IPython.display import display, HTML
    
    # Create collapsible HTML section
    expand_html = f"""
    <details style="margin-top: 20px;">
    <summary style="cursor: pointer; font-weight: bold; padding: 10px; background-color: #f0f0f0; border-radius: 5px;">
    📋 Show all {len(changes_df)} occupations
    </summary>
    <div style="margin-top: 10px;">
    """
    
    display(HTML(expand_html))
    
    # Display all results
    gt_df_all = changes_df.reset_index(drop=True)
    
    # Create wider table for all occupations  
    gt_table_all = (
        GT(gt_df_all.reset_index(drop=True))
        .tab_header(
            title="National Park Service: All Occupational Changes",
            subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025 | USAJobs Historical Data"
        )
        .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
        .cols_align(align="left", columns=["Occupation"])
        .cols_align(align="center", columns=["Historical\nAvg", "2025\nActual", "Change", "% Change"])
        .cols_width({
            "Occupation": "45%",
            "Historical\nAvg": "18%",
            "2025\nActual": "12%", 
            "Change": "12%",
            "% Change": "13%"
        })
        .fmt_number(columns=["Historical\nAvg", "Change"], decimals=0)
        .fmt_number(columns=["2025\nActual"], decimals=0)
        .fmt_number(columns=["% Change"], decimals=0, pattern="{x}%")
        .data_color(
            columns=["% Change"],
            palette=["red", "white", "green"],
            domain=[-100, 50]
        )
        .tab_options(quarto_disable_processing=True)
    )
    
    gt_table_all.show()
    
    display(HTML("</div></details>"))
Analysis Scope
Jan-Dec Comparison Only | USAJobs Historical Data
Period Count
2025 Jan-Dec jobs 1,200
Historical Jan-Dec average (2018-2024) 5,326
Change -77%
Source: github.com/abigailhaddad/usajobs_historical
National Park Service: Top 10 Occupations by Biggest Changes
Jan-Dec 2018-2024 vs Jan-Dec 2025 | USAJobs Historical Data
Occupation Historical Avg 2025 Actual Change % Change
4749-MAINTENANCE MECHANIC 667 129 −538 −81%
0025-PARK RANGER 750 233 −517 −69%
0303-MISCELLANEOUS CLERK AND ASSISTANT 323 50 −273 −85%
0404-BIOLOGICAL SCIENCE TECHNICIAN 293 62 −231 −79%
0090-GUIDE 208 49 −159 −76%
0401-GENERAL NATURAL RESOURCES MANAGEMENT AND BIOLOGICAL SCIENCES 121 5 −116 −96%
0462-FORESTRY TECHNICIAN 113 1 −112 −99%
1640-FACILITY OPERATIONS SERVICES 104 3 −101 −97%
0301-MISCELLANEOUS ADMINISTRATION AND PROGRAM 97 7 −90 −93%
3502-LABORING 130 49 −81 −62%
Source: github.com/abigailhaddad/usajobs_historical
📋 Show all 157 occupations
National Park Service: All Occupational Changes
Jan-Dec 2018-2024 vs Jan-Dec 2025 | USAJobs Historical Data
Occupation Historical Avg 2025 Actual Change % Change
4749-MAINTENANCE MECHANIC 667 129 −538 −81%
0025-PARK RANGER 750 233 −517 −69%
0303-MISCELLANEOUS CLERK AND ASSISTANT 323 50 −273 −85%
0404-BIOLOGICAL SCIENCE TECHNICIAN 293 62 −231 −79%
0090-GUIDE 208 49 −159 −76%
0401-GENERAL NATURAL RESOURCES MANAGEMENT AND BIOLOGICAL SCIENCES 121 5 −116 −96%
0462-FORESTRY TECHNICIAN 113 1 −112 −99%
1640-FACILITY OPERATIONS SERVICES 104 3 −101 −97%
0301-MISCELLANEOUS ADMINISTRATION AND PROGRAM 97 7 −90 −93%
3502-LABORING 130 49 −81 −62%
2210-INFORMATION TECHNOLOGY MANAGEMENT 85 6 −79 −93%
0560-BUDGET ANALYSIS 82 6 −76 −93%
1101-GENERAL BUSINESS AND INDUSTRY 71 1 −70 −99%
0503-FINANCIAL CLERICAL AND ASSISTANCE 112 45 −67 −60%
0201-HUMAN RESOURCES MANAGEMENT 67 1 −66 −99%
0341-ADMINISTRATIVE OFFICER 66 1 −65 −98%
5716-ENGINEERING EQUIPMENT OPERATING 81 19 −62 −77%
0193-ARCHEOLOGY 61 4 −57 −93%
1601-EQUIPMENT FACILITIES, AND SERVICES 47 1 −46 −98%
0807-LANDSCAPE ARCHITECTURE 45 0 −45 −100%
0099-GENERAL STUDENT TRAINEE 56 12 −44 −79%
5703-MOTOR VEHICLE OPERATING 58 17 −41 −71%
0028-ENVIRONMENTAL PROTECTION SPECIALIST 41 0 −41 −100%
1301-GENERAL PHYSICAL SCIENCE 36 1 −35 −97%
1702-EDUCATION AND TRAINING TECHNICIAN 50 17 −33 −66%
3566-CUSTODIAL WORKING 51 19 −32 −63%
0170-HISTORY 32 1 −31 −97%
1102-CONTRACTING 31 0 −31 −100%
2151-DISPATCHING 76 46 −30 −40%
0810-CIVIL ENGINEERING 31 2 −29 −94%
0343-MANAGEMENT AND PROGRAM ANALYSIS 33 5 −28 −85%
0408-ECOLOGY 31 3 −28 −90%
1015-MUSEUM CURATOR 28 1 −27 −96%
0501-FINANCIAL ADMINISTRATION AND PROGRAM 29 3 −26 −90%
1016-MUSEUM SPECIALIST AND TECHNICIAN 46 20 −26 −56%
3603-MASONRY 37 11 −26 −70%
0023-OUTDOOR RECREATION PLANNING 29 3 −26 −90%
0808-ARCHITECTURE 26 1 −25 −96%
5001-MISCELLANEOUS PLANT AND ANIMAL WORK 30 6 −24 −80%
0102-SOCIAL SCIENCE AID AND TECHNICIAN 36 13 −23 −64%
1084-VISUAL INFORMATION 34 11 −23 −68%
4607-CARPENTRY 25 3 −22 −88%
0801-GENERAL ENGINEERING 22 0 −22 −100%
1603-EQUIPMENT, FACILITIES, AND SERVICES ASSISTANCE 26 5 −21 −81%
1035-PUBLIC AFFAIRS 21 0 −21 −100%
5003-GARDENING 29 9 −20 −69%
0203-HUMAN RESOURCES ASSISTANCE 21 1 −20 −95%
0399-ADMINISTRATION AND OFFICE SUPPORT STUDENT TRAINEE 19 0 −19 −100%
4701-MISC GENERAL MAINTENANCE & OPERATIONS WORK 29 10 −19 −65%
0189-RECREATION AID AND ASSISTANT 34 16 −18 −53%
5705-TRACTOR OPERATING 28 10 −18 −64%
1712-TRAINING INSTRUCTION 18 1 −17 −94%
0456-WILDLAND FIRE MANAGEMENT 94 79 −16 −16%
0340-PROGRAM MANAGEMENT 23 7 −16 −70%
1311-PHYSICAL SCIENCE TECHNICIAN 19 4 −15 −79%
0561-BUDGET CLERICAL AND ASSISTANCE 16 1 −15 −94%
1170-REALTY 18 3 −15 −83%
5803-HEAVY MOBILE EQUIPMENT MECHANIC 15 1 −14 −93%
1010-EXHIBITS SPECIALIST 15 1 −14 −93%
2805-ELECTRICIAN 15 1 −14 −93%
5042-TREE TRIMMING AND REMOVING 16 2 −14 −87%
5823-AUTOMOTIVE MECHANIC 16 3 −13 −82%
0486-WILDLIFE BIOLOGY 12 0 −12 −100%
0018-SAFETY AND OCCUPATIONAL HEALTH MANAGEMENT 24 12 −12 −51%
0190-GENERAL ANTHROPOLOGY 12 1 −11 −91%
0020-COMMUNITY PLANNING 11 0 −11 −100%
2005-SUPPLY CLERICAL AND TECHNICIAN 12 1 −11 −91%
3501-MISC GENERAL SERVICES AND SUPPORT WORK 12 1 −11 −92%
5406-UTILITY SYSTEMS OPERATING 11 1 −10 −91%
0809-CONSTRUCTION CONTROL TECHNICAL 11 1 −10 −91%
0499-BIOLOGICAL SCIENCE STUDENT TRAINEE 12 3 −9 −74%
0101-SOCIAL SCIENCE 9 0 −9 −100%
1001-GENERAL ARTS AND INFORMATION 11 2 −9 −81%
1109-GRANTS MANAGEMENT 10 1 −9 −90%
0802-ENGINEERING TECHNICAL 8 0 −8 −100%
0318-SECRETARY 8 0 −8 −100%
5786-SMALL CRAFT OPERATING 13 5 −8 −61%
0391-TELECOMMUNICATIONS 11 3 −8 −73%
1315-HYDROLOGY 8 0 −8 −100%
1171-APPRAISING 8 0 −8 −100%
0080-SECURITY ADMINISTRATION 9 2 −7 −78%
5048-ANIMAL CARETAKING 7 0 −7 −100%
0180-PSYCHOLOGY 6 0 −6 −100%
2101-TRANSPORTATION SPECIALIST 6 0 −6 −100%
1371-CARTOGRAPHIC TECHNICIAN 12 6 −6 −51%
1350-GEOLOGY 6 0 −6 −100%
1420-ARCHIVIST 9 3 −6 −66%
0260-EQUAL EMPLOYMENT OPPORTUNITY 5 0 −5 −100%
4206-PLUMBING 6 1 −5 −82%
1370-CARTOGRAPHY 6 1 −5 −84%
2181-AIRCRAFT OPERATION 6 1 −5 −83%
1701-GENERAL EDUCATION AND TRAINING 6 1 −5 −83%
0830-MECHANICAL ENGINEERING 5 0 −5 −100%
4742-UTILITY SYSTEMS REPAIRING-OPERATING 41 46 5 13%
0640-HEALTH AID AND TECHNICIAN 14 9 −5 −34%
0081-FIRE PROTECTION AND PREVENTION 4 8 4 115%
5409-WATER TREATMENT PLANT OPERATING 4 0 −4 −100%
2003-SUPPLY PROGRAM MANAGEMENT 4 0 −4 −100%
0510-ACCOUNTING 4 0 −4 −100%
1373-LAND SURVEYING 4 0 −4 −100%
0437-HORTICULTURE 4 0 −4 −100%
0430-BOTANY 4 0 −4 −100%
0599-FINANCIAL MANAGEMENT STUDENT TRAINEE 4 1 −4 −78%
0899-ENGINEERING AND ARCHITECTURE STUDENT TRAINEE 4 0 −4 −100%
0199-SOCIAL SCIENCE STUDENT TRAINEE 3 0 −3 −100%
2810-HIGH VOLTAGE ELECTRICIAN 3 0 −3 −100%
0083-POLICE 5 8 3 47%
4102-PAINTING 8 5 −3 −40%
5026-PEST CONTROLLING 3 0 −3 −100%
1560-DATA SCIENCE SERIES 3 0 −3 −100%
1811-CRIMINAL INVESTIGATION 3 6 3 100%
1421-ARCHIVES TECHNICIAN 5 2 −3 −59%
5306-AIR CONDITIONING EQUIPMENT MECHANIC 3 0 −3 −100%
0856-ELECTRONICS TECHNICAL 3 0 −3 −100%
2299-INFORMATION TECHNOLOGY STUDENT TRAINEE 3 0 −3 −100%
1082-WRITING AND EDITING 3 0 −3 −100%
4605-WOOD CRAFTING 3 0 −3 −100%
1173-HOUSING MANAGEMENT 6 3 −3 −49%
0085-SECURITY GUARD 5 2 −3 −61%
0850-ELECTRICAL ENGINEERING 4 1 −3 −72%
0150-GEOGRAPHY 2 0 −2 −100%
0505-FINANCIAL MANAGEMENT 2 0 −2 −100%
0819-ENVIRONMENTAL ENGINEERING 2 0 −2 −100%
0855-ELECTRONICS ENGINEERING 2 0 −2 −100%
0308-RECORDS AND INFORMATION MANAGEMENT 2 0 −2 −100%
1750-INSTRUCTIONAL SYSTEMS 2 0 −2 −100%
1799-EDUCATION STUDENT TRAINEE 2 0 −2 −100%
1160-FINANCIAL ANALYSIS 2 0 −2 −100%
1105-PURCHASING 2 0 −2 −100%
0306-GOVERNMENT INFORMATION SPECIALIST 4 1 −2 −71%
0454-RANGELAND MANAGEMENT 2 0 −2 −100%
0299-HUMAN RESOURCES MANAGEMENT STUDENT TRAINEE 2 0 −2 −100%
1910-QUALITY ASSURANCE 2 0 −2 −100%
4104-SIGN PAINTING 6 4 −2 −28%
0136-INTERNATIONAL COOPERATION 2 0 −2 −100%
5701-MISC TRANSPORTATION/MOBILE EQUIPMENT OPER 2 0 −2 −100%
0346-LOGISTICS MANAGEMENT 4 1 −2 −71%
2604-ELECTRONICS MECHANIC 2 0 −2 −100%
0335-COMPUTER CLERK AND ASSISTANT 3 1 −2 −68%
1106-PROCUREMENT CLERICAL AND TECHNICIAN 2 0 −2 −100%
0482-FISH BIOLOGY 4 2 −2 −46%
1699-EQUIPMENT AND FACILITIES MANAGEMENT STUDENT TRAINEE 2 0 −2 −100%
0828-CONSTRUCTION ANALYST 2 0 −2 −100%
1099-INFORMATION AND ARTS STUDENT TRAINEE 2 0 −2 −100%
6907-MATERIALS HANDLER 2 0 −2 −100%
0701-VETERINARY MEDICAL SCIENCE 2 0 −2 −100%
0803-SAFETY ENGINEERING 2 0 −2 −100%
5334-MARINE MACHINERY MECHANIC 2 0 −2 −100%
0455-RANGE TECHNICIAN 3 4 1 56%
0804-FIRE PROTECTION ENGINEERING 2 1 −1 −50%
5801-MISC TRANSPORTATION/MOBILE EQUIPMT MAINTNE 2 1 −1 −50%
7404-COOKING 2 1 −1 −56%
0326-OFFICE AUTOMATION CLERICAL AND ASSISTANCE 5 4 −1 −15%
1341-METEOROLOGICAL TECHNICIAN 3 2 −1 −26%
5788-DECKHAND 3 2 −1 −33%
1316-HYDROLOGIC TECHNICIAN 5 5 0 −8%
2150-TRANSPORTATION OPERATIONS 2 2 0 −20%
Source: github.com/abigailhaddad/usajobs_historical

Are There More Group Announcements in 2025?

Code
def categorize_openings(opening_val):
    """Categorize openings (MANY=Many, FEW=Few, etc.)"""
    opening_str = str(opening_val).upper()
    if opening_str in ['MANY', 'FEW', 'SEVERAL']:
        return opening_str.title()
    else:
        try:
            return str(int(float(opening_val)))
        except:
            return 'Other'

def analyze_opening_types():
    f"""Analyze opening types comparison between {comparison_period} 2018-2024 and {comparison_period} 2025"""
    # Use pre-filtered comparison period datasets and filter for jobs with opening data
    openings_2025 = nps_2025_comparison[nps_2025_comparison['totalOpenings'].notna()].copy()
    openings_historical = nps_historical_comparison[nps_historical_comparison['totalOpenings'].notna()].copy()
    
    # Apply categorization
    openings_2025.loc[:, 'opening_category'] = openings_2025['totalOpenings'].apply(categorize_openings)
    openings_historical.loc[:, 'opening_category'] = openings_historical['totalOpenings'].apply(categorize_openings)
    
    # Get top 10 categories for each period
    hist_top10 = openings_historical['opening_category'].value_counts().head(10)
    curr_top10 = openings_2025['opening_category'].value_counts().head(10)
    
    # Calculate percentages
    hist_pcts = (hist_top10 / len(openings_historical) * 100).round(0)
    curr_pcts = (curr_top10 / len(openings_2025) * 100).round(0)
    
    # Create simple comparison table
    comparison_data = []
    all_categories = set(hist_top10.index) | set(curr_top10.index)

    
    for category in sorted(all_categories):
        hist_pct = hist_pcts.get(category, 0)
        curr_pct = curr_pcts.get(category, 0)
        comparison_data.append({
            'Total Openings': category,
            'Historical\n%': hist_pct,
            '2025\n%': curr_pct,
            'Change': round(curr_pct - hist_pct, 0)
        })
    
    comparison_df = pd.DataFrame(comparison_data)
    comparison_df = comparison_df.sort_values('Historical\n%', ascending=False)
    
    # Display with Great Tables
    gt_openings_df = comparison_df.reset_index(drop=True)
    
    gt_openings = (
        create_standard_gt_table(
            data=gt_openings_df,
            title="National Park Service: Top 10 Total Openings Comparison",
            subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025",
            align_left_cols=["Total Openings"],
            align_center_cols=["Historical\n%", "2025\n%", "Change"],
            col_widths={
                "Total Openings": "35%",
                "Historical\n%": "22%",
                "2025\n%": "22%",
                "Change": "21%"
            }
        )
        .fmt_number(
            columns=["Historical\n%", "2025\n%", "Change"],
            decimals=0,
            pattern="{x}%"
        )
        .data_color(
            columns=["Change"],
            palette=["red", "white", "green"],
            domain=[-25, 25]
        )
    )
    
    gt_openings.show()

# Run the analysis
analyze_opening_types()
National Park Service: Top 10 Total Openings Comparison
Jan-Dec 2018-2024 vs Jan-Dec 2025 | USAJobs Historical Data
Total Openings Historical % 2025 % Change
1 65% 49% −16%
2 10% 16% 6%
Many 9% 16% 7%
Few 5% 4% −1%
3 3% 4% 1%
4 2% 3% 1%
6 1% 2% 1%
10 1% 0% −1%
5 1% 1% 0%
12 0% 1% 1%
8 0% 1% 1%
7 0% 0% 0%
Source: github.com/abigailhaddad/usajobs_historical

Code
from IPython.display import display, Markdown
dt_info = get_current_datetime()
display(Markdown(f"*Analysis generated on {dt_info['formatted']}*"))

Analysis generated on 2026-01-03 10:09:36