National Park Service Job Postings Analysis

Author

Abigail Haddad

Published

September 3, 2025

This analysis examines federal job posting trends for the National Park Service from 2018 through the last complete month of available data.

Resources: GitHub Repository | Field Documentation | USAJobs API

Data Loading and Preparation

Code
import pandas as pd
import numpy as np
from datetime import datetime
import calendar
import json

from great_tables import GT, md

def create_standard_gt_table(data, title, subtitle="", align_left_cols=None, align_center_cols=None, col_widths=None, include_source=True):
    """Create a standardized Great Tables table with common formatting"""
    # Add USAJobs data attribution to subtitle if not already included
    if include_source and subtitle and "USAJobs" not in subtitle:
        subtitle = f"{subtitle} | USAJobs Historical Data"
    elif include_source and not subtitle:
        subtitle = "USAJobs Historical Data"
    
    # Start with basic table
    gt_table = GT(data.reset_index(drop=True))
    gt_table = gt_table.tab_header(title=title, subtitle=subtitle)
    gt_table = gt_table.tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
    
    # Apply alignments
    if align_left_cols:
        gt_table = gt_table.cols_align(align="left", columns=align_left_cols)
    if align_center_cols:
        gt_table = gt_table.cols_align(align="center", columns=align_center_cols)
    
    # Apply widths
    if col_widths:
        gt_table = gt_table.cols_width(col_widths)
    
    # Apply options without width constraints
    gt_table = gt_table.tab_options(
        quarto_disable_processing=True
    )
    
    return gt_table

def get_current_datetime():
    """Get current date and time info for consistent usage"""
    current_date = datetime.now()
    return {
        'date': current_date,
        'year': current_date.year,
        'month': current_date.month,
        'formatted': current_date.strftime('%Y-%m-%d %H:%M:%S')
    }

def extract_series(job_categories):
    """Extract occupational series from JobCategories JSON field"""
    try:
        if pd.isna(job_categories):
            return 'Unknown'
        categories = json.loads(job_categories)
        if categories and len(categories) > 0 and 'series' in categories[0]:
            return categories[0]['series']
        return 'Unknown'
    except:
        return 'Unknown'

def categorize_appointment(appt_type):
    """Categorize appointment types into Permanent, Term/Temporary, or Other"""
    if appt_type == 'Permanent':
        return 'Permanent'
    elif appt_type in ['Term', 'Temporary', 'Seasonal', 'Summer', 'Intermittent', 'Internships']:
        return 'Term/Temporary'
    else:
        return 'Other'

def load_nps_data():
    """Load and prepare National Park Service job data"""
    # Load all years from 2018 onwards
    years = range(2018, 2026)
    all_data = []
    year_counts = []
    
    for year in years:
        # Load historical data
        try:
            df = pd.read_parquet(f'../../data/historical_jobs_{year}.parquet')
            year_counts.append({'Year': year, 'Jobs Loaded': f"{len(df):,}"})
            all_data.append(df)
        except FileNotFoundError:
            year_counts.append({'Year': year, 'Jobs Loaded': "No data"})
        
        # Load current data if available and deduplicate
        try:
            current_df = pd.read_parquet(f'../../data/current_jobs_{year}.parquet')
            if len(current_df) > 0:
                # Deduplicate by usajobsControlNumber before combining
                existing_control_numbers = set(df['usajobsControlNumber']) if 'df' in locals() else set()
                new_current_jobs = current_df[~current_df['usajobsControlNumber'].isin(existing_control_numbers)]
                if len(new_current_jobs) > 0:
                    all_data.append(new_current_jobs)
                    year_counts[-1]['Jobs Loaded'] += f" + {len(new_current_jobs):,} current"
        except FileNotFoundError:
            pass
    
    # Create data loading summary table
    loading_summary = pd.DataFrame(year_counts)
    
    # Combine all years
    combined_df = pd.concat(all_data, ignore_index=True)
    
    # Convert dates with mixed format handling
    combined_df['positionOpenDate'] = pd.to_datetime(combined_df['positionOpenDate'], format='mixed')
    combined_df['year'] = combined_df['positionOpenDate'].dt.year
    combined_df['month'] = combined_df['positionOpenDate'].dt.month
    
    # Dynamically determine the last complete month
    # If we're on the 2nd of the month or later, consider the previous month complete
    today = datetime.now()
    if today.day >= 2:
        last_complete_year = today.year
        last_complete_month = today.month - 1
        if last_complete_month == 0:
            last_complete_month = 12
            last_complete_year = today.year - 1
    else:
        # If it's the 1st, use two months ago as the last complete month
        last_complete_year = today.year
        last_complete_month = today.month - 2
        if last_complete_month <= 0:
            last_complete_month = 12 + last_complete_month
            last_complete_year = today.year - 1
    
    # Filter to only include data through the last complete month
    combined_df = combined_df[
        (combined_df['year'] < last_complete_year) | 
        ((combined_df['year'] == last_complete_year) & (combined_df['month'] <= last_complete_month))
    ].copy()
    
    print(f"Data includes postings through {calendar.month_name[last_complete_month]} {last_complete_year}")
    
    # Filter for National Park Service
    nps_df = combined_df[combined_df['hiringAgencyName'] == 'National Park Service'].copy()
    
    # Extract occupational series and categorize appointments
    nps_df['occupational_series'] = nps_df['JobCategories'].apply(extract_series)
    nps_df['appt_category'] = nps_df['appointmentType'].apply(categorize_appointment)
    
    # Create summary stats
    loading_stats = pd.DataFrame({
        'Metric': ['Total jobs loaded', 'National Park Service jobs', 'Data coverage'],
        'Value': [
            f"{len(combined_df):,}",
            f"{len(nps_df):,}",
            f"{len(year_counts)} years (2018-{calendar.month_name[last_complete_month]} {last_complete_year})"
        ]
    })
    
    return nps_df, loading_summary, loading_stats

# Load data
nps_df, loading_summary, loading_stats = load_nps_data()

# Create filtered datasets for year-over-year comparison
# For 2025, use all available months; for historical, use the same months
max_2025_month = nps_df[nps_df['year'] == 2025]['month'].max() if len(nps_df[nps_df['year'] == 2025]) > 0 else 12
comparison_months = list(range(1, max_2025_month + 1))
month_names = [calendar.month_name[m][:3] for m in comparison_months]
comparison_period = f"{month_names[0]}-{month_names[-1]}"

# Filter datasets to same months for fair comparison
nps_comparison_months = nps_df[nps_df['month'].isin(comparison_months)].copy()
nps_2025_comparison = nps_comparison_months[nps_comparison_months['year'] == 2025]
nps_historical_comparison = nps_comparison_months[nps_comparison_months['year'].between(2018, 2024)]

# Display data loading summary as Great Table
gt_loading_stats = (
    GT(loading_stats.reset_index(drop=True))
    .tab_header(
        title="Data Loading & Filtering Summary",
        subtitle="USAJobs Data Processing Results"
    )
    .cols_align(
        align="left",
        columns=["Metric"]
    )
    .cols_align(
        align="center",
        columns=["Value"]
    )
    .cols_width({
        "Metric": "60%",
        "Value": "40%"
    })
    .tab_options(quarto_disable_processing=True)
)
gt_loading_stats.show()

# Show appointment type categorization as Great Table
appt_breakdown = pd.DataFrame({
    'Appointment Type': nps_df['appointmentType'].value_counts().index,
    'Count': nps_df['appointmentType'].value_counts().values,
    'Category': [categorize_appointment(x) for x in nps_df['appointmentType'].value_counts().index]
})

gt_appt = (
    create_standard_gt_table(
        data=appt_breakdown,
        title="Appointment Type Categorization",
        subtitle="National Park Service Job Types (2018-2025)",
        align_left_cols=["Appointment Type", "Category"],
        align_center_cols=["Count"],
        col_widths={"Appointment Type": "45%", "Count": "20%", "Category": "35%"}
    )
    .fmt_number(columns=["Count"], sep_mark=",", decimals=0)
)
gt_appt.show()
Data includes postings through August 2025
Data Loading & Filtering Summary
USAJobs Data Processing Results
Metric Value
Total jobs loaded 2,752,190
National Park Service jobs 38,295
Data coverage 8 years (2018-August 2025)
Appointment Type Categorization
National Park Service Job Types (2018-2025) | USAJobs Historical Data
Appointment Type Count Category
Permanent 22,523 Permanent
Temporary 10,891 Term/Temporary
Term 2,108 Term/Temporary
Internships 791 Term/Temporary
Multiple 668 Other
Temporary Promotion 623 Other
Seasonal 523 Term/Temporary
Recent graduates 81 Other
ICTAP Only 38 Other
Agency Employees Only 22 Other
Detail 17 Other
Summer 4 Term/Temporary
Temporary promotion 4 Other
Intermittent 2 Term/Temporary
Source: github.com/abigailhaddad/usajobs_historical

Monthly Hiring Heatmaps

Code
# Get current date to limit display
dt_info = get_current_datetime()
current_year = dt_info['year']
current_month = dt_info['month']

# Get the last complete month from our data (calculated during loading)
# This will be used to filter out future months in visualizations
last_complete_info = nps_df['year'].max(), nps_df[nps_df['year'] == nps_df['year'].max()]['month'].max()

def should_show_month(year, month):
    last_year, last_month = last_complete_info
    if year < last_year:
        return True
    elif year == last_year:
        return month <= last_month
    else:
        return False

# Constants
MONTH_LABELS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 
                'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']

def create_heatmap_table(df_subset, title, subtitle=""):
    """Create a Great Tables heatmap-style table"""
    monthly_breakdown = df_subset.groupby(['year', 'month']).size().reset_index(name='job_count')
    monthly_pivot = monthly_breakdown.pivot(index='month', columns='year', values='job_count').fillna(0)
    
    # Mask future months
    for year in monthly_pivot.columns:
        for month in monthly_pivot.index:
            if not should_show_month(year, month):
                monthly_pivot.loc[month, year] = np.nan
    
    # Add month names
    monthly_pivot.index = MONTH_LABELS
    
    # Reset index to make month a column
    monthly_pivot_reset = monthly_pivot.reset_index()
    monthly_pivot_reset.columns.name = None
    monthly_pivot_reset = monthly_pivot_reset.rename(columns={'index': 'Month'})
    
    # Get year columns for formatting - convert to strings to ensure proper handling
    year_cols = [str(col) for col in monthly_pivot_reset.columns if str(col) != 'Month']
    
    # Create color scale values for the data
    max_val = monthly_pivot.max().max()
    
    # Rename columns to strings for Great Tables
    monthly_pivot_reset.columns = [str(col) for col in monthly_pivot_reset.columns]
    
    # Keep subtitle as-is for heatmaps (they already have repo link in footnote)
    
    gt_heatmap = (
        GT(monthly_pivot_reset)
        .tab_header(title=title, subtitle=subtitle)
        .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
        .fmt_number(columns=year_cols, decimals=0, sep_mark=",")
        .data_color(
            columns=year_cols,
            palette=["white", "orange", "darkred"],
            domain=[0, max_val],
            na_color="lightgray"
        )
        .cols_align(align="center", columns=year_cols)
        .cols_align(align="left", columns=["Month"])
        .tab_options(quarto_disable_processing=True)
    )
    
    gt_heatmap.show()

# Create heatmap summary data
def create_appointment_summary(df):
    """Create summary of job counts by appointment category"""
    permanent_count = len(df[df['appt_category'] == 'Permanent'])
    temp_count = len(df[df['appt_category'] == 'Term/Temporary'])
    total_count = len(df)
    
    return pd.DataFrame({
        'Category': ['All NPS Positions', 'Permanent Positions', 'Term/Temporary Positions'],
        'Job Count': [f"{total_count:,}", f"{permanent_count:,}", f"{temp_count:,}"],
        'Percentage': ["100%", f"{permanent_count/total_count*100:.0f}%", f"{temp_count/total_count*100:.0f}%"]
    })

heatmap_summary = create_appointment_summary(nps_df)
gt_heatmap_summary = create_standard_gt_table(
    data=heatmap_summary,
    title="Heatmap Categories Summary",
    subtitle="National Park Service Job Distribution by Appointment Type",
    align_left_cols=["Category"],
    align_center_cols=["Job Count", "Percentage"],
    col_widths={"Category": "50%", "Job Count": "25%", "Percentage": "25%"}
)
gt_heatmap_summary.show()

# 1. All NPS jobs
create_heatmap_table(nps_df, 
                    "National Park Service - All USAJobs Postings by Month and Year",
                    "")

# 2. Permanent positions only
permanent_df = nps_df[nps_df['appt_category'] == 'Permanent']
create_heatmap_table(permanent_df, 
                    "National Park Service - Permanent USAJobs Positions",
                    "")

# 3. Term/Temporary positions only  
temp_df = nps_df[nps_df['appt_category'] == 'Term/Temporary']
create_heatmap_table(temp_df, 
                    "National Park Service - Term/Temporary USAJobs Positions",
                    "")
Heatmap Categories Summary
National Park Service Job Distribution by Appointment Type | USAJobs Historical Data
Category Job Count Percentage
All NPS Positions 38,295 100%
Permanent Positions 22,523 59%
Term/Temporary Positions 14,319 37%
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - All USAJobs Postings by Month and Year
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 739 220 727 288 549 574 512 326
Feb 563 621 459 345 424 482 401 58
Mar 459 537 451 437 600 503 448 104
Apr 432 513 342 371 509 436 432 119
May 359 405 343 324 445 434 398 81
Jun 361 351 338 323 430 424 344 84
Jul 342 480 350 368 327 426 427 134
Aug 346 449 341 313 494 454 387 108
Sep 299 359 311 332 350 370 400
Oct 371 412 371 349 415 419 526
Nov 388 436 344 465 559 549 554
Dec 500 632 844 717 632 522 698
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - Permanent USAJobs Positions
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 181 55 272 116 252 303 255 168
Feb 180 201 228 212 265 308 228 1
Mar 179 268 289 271 412 351 293 7
Apr 219 322 260 269 359 323 285 37
May 205 290 277 272 346 341 284 38
Jun 248 258 262 272 359 326 243 43
Jul 242 343 275 281 247 305 276 56
Aug 256 348 263 236 392 358 266 39
Sep 212 281 247 251 293 237 236
Oct 236 282 251 243 296 245 348
Nov 208 239 166 225 298 257 227
Dec 170 227 236 230 271 201 294
Source: github.com/abigailhaddad/usajobs_historical
National Park Service - Term/Temporary USAJobs Positions
Month 2018 2019 2020 2021 2022 2023 2024 2025
Jan 545 162 439 161 271 258 226 150
Feb 354 400 209 124 143 157 148 57
Mar 259 258 143 141 157 141 139 97
Apr 199 171 59 83 125 93 126 80
May 134 105 43 41 73 80 96 43
Jun 99 82 57 37 59 76 87 41
Jul 87 113 63 68 65 110 130 77
Aug 75 78 60 64 82 86 99 67
Sep 74 69 39 50 44 122 152
Oct 124 117 108 87 107 161 170
Nov 169 192 166 218 235 275 305
Dec 315 396 574 468 338 310 382
Source: github.com/abigailhaddad/usajobs_historical

2025 vs Previous Years: What’s Being Hired Less

Code
def analyze_occupation_changes():
    """Analyze Jan-Jun 2018-2024 vs Jan-Jun 2025 occupational series changes"""
    # Load occupation mapping
    occ_mapping = pd.read_csv('../DTocc.txt')
    occ_dict = dict(zip(occ_mapping['OCC'].astype(str).str.zfill(4), occ_mapping['OCCT']))
    
    # Create summary table first
    # Calculate average for historical period
    historical_avg = len(nps_historical_comparison) / 7  # 7 years (2018-2024)
    pct_change = ((len(nps_2025_comparison) - historical_avg) / historical_avg) * 100
    summary_data = pd.DataFrame({
        'Period': [f'2025 {comparison_period} jobs', f'Historical {comparison_period} average (2018-2024)', 'Change'],
        'Count': [f"{len(nps_2025_comparison):,}", f"{historical_avg:,.0f}", f"{pct_change:.0f}%"]
    })
    
    gt_scope = create_standard_gt_table(
        data=summary_data,
        title="Analysis Scope",
        subtitle=f"{comparison_period} Comparison Only",
        align_left_cols=["Period"],
        align_center_cols=["Count"],
        col_widths={"Period": "70%", "Count": "30%"}
    )
    gt_scope.show()
    
    # Get 2025 counts by occupational series for comparison period
    occ_2025 = nps_2025_comparison['occupational_series'].value_counts()
    
    # Get historical average by occupational series for comparison period
    occ_historical = nps_historical_comparison.groupby(['year', 'occupational_series']).size().reset_index(name='count')
    occ_historical_avg = occ_historical.groupby('occupational_series')['count'].mean()
    
    # Compare and find biggest changes
    comparison_data = []
    all_series = set(occ_2025.index) | set(occ_historical_avg.index)
    
    for series in all_series:
        if pd.notna(series) and series != 'Unknown':
            count_2025 = occ_2025.get(series, 0)
            avg_historical = occ_historical_avg.get(series, 0)
            
            if avg_historical >= 2:  # Only meaningful changes
                difference = count_2025 - avg_historical
                pct_change = ((count_2025 - avg_historical) / avg_historical) * 100 if avg_historical > 0 else 0
                occ_title = occ_dict.get(series, f"Series {series}")
                comparison_data.append({
                    'Occupation': occ_title,
                    'Historical\nAvg': round(avg_historical),
                    '2025\nActual': count_2025,
                    'Change': round(difference),
                    '% Change': round(pct_change, 0)
                })
    
    # Convert to DataFrame and sort by absolute change (biggest changes first)
    comparison_df = pd.DataFrame(comparison_data)
    comparison_df['abs_change'] = abs(comparison_df['Change'])
    comparison_df = comparison_df.sort_values('abs_change', ascending=False)
    comparison_df = comparison_df.drop('abs_change', axis=1)
    
    return comparison_df

# Analyze changes
changes_df = analyze_occupation_changes()

# Display top 10
gt_df_top10 = changes_df.head(10).reset_index(drop=True)

# Create wider table for occupation names
gt_table_top10 = (
    GT(gt_df_top10.reset_index(drop=True))
    .tab_header(
        title="National Park Service: Top 10 Occupations by Biggest Changes",
        subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025 | USAJobs Historical Data"
    )
    .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
    .cols_align(align="left", columns=["Occupation"])
    .cols_align(align="center", columns=["Historical\nAvg", "2025\nActual", "Change", "% Change"])
    .cols_width({
        "Occupation": "45%",
        "Historical\nAvg": "18%",
        "2025\nActual": "12%", 
        "Change": "12%",
        "% Change": "13%"
    })
    .fmt_number(columns=["Historical\nAvg", "Change"], decimals=0)
    .fmt_number(columns=["2025\nActual"], decimals=0)
    .fmt_number(columns=["% Change"], decimals=0, pattern="{x}%")
    .data_color(
        columns=["% Change"],
        palette=["red", "white", "green"],
        domain=[-100, 50]
    )
    .tab_options(quarto_disable_processing=True)
)

gt_table_top10.show()

# Show expandable section for all results if there are more than 10
if len(changes_df) > 10:
    from IPython.display import display, HTML
    
    # Create collapsible HTML section
    expand_html = f"""
    <details style="margin-top: 20px;">
    <summary style="cursor: pointer; font-weight: bold; padding: 10px; background-color: #f0f0f0; border-radius: 5px;">
    📋 Show all {len(changes_df)} occupations
    </summary>
    <div style="margin-top: 10px;">
    """
    
    display(HTML(expand_html))
    
    # Display all results
    gt_df_all = changes_df.reset_index(drop=True)
    
    # Create wider table for all occupations  
    gt_table_all = (
        GT(gt_df_all.reset_index(drop=True))
        .tab_header(
            title="National Park Service: All Occupational Changes",
            subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025 | USAJobs Historical Data"
        )
        .tab_source_note(md("*Source: github.com/abigailhaddad/usajobs_historical*"))
        .cols_align(align="left", columns=["Occupation"])
        .cols_align(align="center", columns=["Historical\nAvg", "2025\nActual", "Change", "% Change"])
        .cols_width({
            "Occupation": "45%",
            "Historical\nAvg": "18%",
            "2025\nActual": "12%", 
            "Change": "12%",
            "% Change": "13%"
        })
        .fmt_number(columns=["Historical\nAvg", "Change"], decimals=0)
        .fmt_number(columns=["2025\nActual"], decimals=0)
        .fmt_number(columns=["% Change"], decimals=0, pattern="{x}%")
        .data_color(
            columns=["% Change"],
            palette=["red", "white", "green"],
            domain=[-100, 50]
        )
        .tab_options(quarto_disable_processing=True)
    )
    
    gt_table_all.show()
    
    display(HTML("</div></details>"))
Analysis Scope
Jan-Aug Comparison Only | USAJobs Historical Data
Period Count
2025 Jan-Aug jobs 1,014
Historical Jan-Aug average (2018-2024) 3,451
Change -71%
Source: github.com/abigailhaddad/usajobs_historical
National Park Service: Top 10 Occupations by Biggest Changes
Jan-Aug 2018-2024 vs Jan-Aug 2025 | USAJobs Historical Data
Occupation Historical Avg 2025 Actual Change % Change
4749-MAINTENANCE MECHANIC 448 119 −329 −73%
0025-PARK RANGER 459 185 −274 −60%
0303-MISCELLANEOUS CLERK AND ASSISTANT 195 47 −148 −76%
0404-BIOLOGICAL SCIENCE TECHNICIAN 163 60 −103 −63%
0090-GUIDE 127 38 −89 −70%
0401-GENERAL NATURAL RESOURCES MANAGEMENT AND BIOLOGICAL SCIENCES 82 3 −79 −96%
1640-FACILITY OPERATIONS SERVICES 73 2 −71 −97%
0462-FORESTRY TECHNICIAN 70 1 −69 −99%
0301-MISCELLANEOUS ADMINISTRATION AND PROGRAM 65 7 −58 −89%
2210-INFORMATION TECHNOLOGY MANAGEMENT 57 4 −53 −93%
Source: github.com/abigailhaddad/usajobs_historical
📋 Show all 148 occupations
National Park Service: All Occupational Changes
Jan-Aug 2018-2024 vs Jan-Aug 2025 | USAJobs Historical Data
Occupation Historical Avg 2025 Actual Change % Change
4749-MAINTENANCE MECHANIC 448 119 −329 −73%
0025-PARK RANGER 459 185 −274 −60%
0303-MISCELLANEOUS CLERK AND ASSISTANT 195 47 −148 −76%
0404-BIOLOGICAL SCIENCE TECHNICIAN 163 60 −103 −63%
0090-GUIDE 127 38 −89 −70%
0401-GENERAL NATURAL RESOURCES MANAGEMENT AND BIOLOGICAL SCIENCES 82 3 −79 −96%
1640-FACILITY OPERATIONS SERVICES 73 2 −71 −97%
0462-FORESTRY TECHNICIAN 70 1 −69 −99%
0301-MISCELLANEOUS ADMINISTRATION AND PROGRAM 65 7 −58 −89%
2210-INFORMATION TECHNOLOGY MANAGEMENT 57 4 −53 −93%
0560-BUDGET ANALYSIS 56 6 −50 −89%
3502-LABORING 93 46 −47 −51%
1101-GENERAL BUSINESS AND INDUSTRY 47 1 −46 −98%
0456-WILDLAND FIRE MANAGEMENT 109 63 −46 −42%
0341-ADMINISTRATIVE OFFICER 46 1 −45 −98%
0201-HUMAN RESOURCES MANAGEMENT 45 1 −44 −98%
5716-ENGINEERING EQUIPMENT OPERATING 55 15 −40 −73%
0193-ARCHEOLOGY 40 4 −36 −90%
0503-FINANCIAL CLERICAL AND ASSISTANCE 65 31 −34 −52%
1601-EQUIPMENT FACILITIES, AND SERVICES 32 0 −32 −100%
0807-LANDSCAPE ARCHITECTURE 32 0 −32 −100%
5703-MOTOR VEHICLE OPERATING 39 12 −27 −69%
0028-ENVIRONMENTAL PROTECTION SPECIALIST 26 0 −26 −100%
0099-GENERAL STUDENT TRAINEE 38 12 −26 −69%
1301-GENERAL PHYSICAL SCIENCE 27 3 −24 −89%
1102-CONTRACTING 23 0 −23 −100%
3566-CUSTODIAL WORKING 36 14 −22 −61%
0170-HISTORY 22 1 −21 −95%
0810-CIVIL ENGINEERING 22 1 −21 −95%
1015-MUSEUM CURATOR 19 1 −18 −95%
0408-ECOLOGY 21 3 −18 −86%
0023-OUTDOOR RECREATION PLANNING 20 3 −17 −85%
0343-MANAGEMENT AND PROGRAM ANALYSIS 21 4 −17 −81%
0501-FINANCIAL ADMINISTRATION AND PROGRAM 19 3 −16 −84%
1702-EDUCATION AND TRAINING TECHNICIAN 30 14 −16 −53%
1035-PUBLIC AFFAIRS 15 0 −15 −100%
0808-ARCHITECTURE 17 2 −15 −88%
0801-GENERAL ENGINEERING 15 0 −15 −100%
0102-SOCIAL SCIENCE AID AND TECHNICIAN 26 12 −14 −54%
1603-EQUIPMENT, FACILITIES, AND SERVICES ASSISTANCE 18 4 −14 −78%
4607-CARPENTRY 15 1 −14 −93%
3603-MASONRY 24 11 −13 −54%
0203-HUMAN RESOURCES ASSISTANCE 14 1 −13 −93%
0399-ADMINISTRATION AND OFFICE SUPPORT STUDENT TRAINEE 13 0 −13 −100%
2151-DISPATCHING 51 39 −12 −23%
5003-GARDENING 20 8 −12 −60%
1712-TRAINING INSTRUCTION 12 1 −11 −91%
1016-MUSEUM SPECIALIST AND TECHNICIAN 30 19 −11 −37%
0189-RECREATION AID AND ASSISTANT 26 15 −11 −43%
5001-MISCELLANEOUS PLANT AND ANIMAL WORK 16 6 −10 −62%
0340-PROGRAM MANAGEMENT 17 7 −10 −59%
2805-ELECTRICIAN 10 0 −10 −100%
1170-REALTY 12 3 −9 −76%
1084-VISUAL INFORMATION 20 11 −9 −44%
0018-SAFETY AND OCCUPATIONAL HEALTH MANAGEMENT 17 8 −9 −54%
3501-MISC GENERAL SERVICES AND SUPPORT WORK 10 1 −9 −90%
5803-HEAVY MOBILE EQUIPMENT MECHANIC 10 1 −9 −90%
4701-MISC GENERAL MAINTENANCE & OPERATIONS WORK 19 10 −9 −48%
0561-BUDGET CLERICAL AND ASSISTANCE 10 1 −9 −90%
5823-AUTOMOTIVE MECHANIC 11 3 −8 −72%
1010-EXHIBITS SPECIALIST 9 1 −8 −89%
5705-TRACTOR OPERATING 18 10 −8 −46%
0020-COMMUNITY PLANNING 8 0 −8 −100%
1171-APPRAISING 8 0 −8 −100%
0486-WILDLIFE BIOLOGY 8 0 −8 −100%
0802-ENGINEERING TECHNICAL 7 0 −7 −100%
1311-PHYSICAL SCIENCE TECHNICIAN 11 4 −7 −63%
1109-GRANTS MANAGEMENT 8 1 −7 −88%
5042-TREE TRIMMING AND REMOVING 9 2 −7 −78%
2005-SUPPLY CLERICAL AND TECHNICIAN 7 1 −6 −87%
0391-TELECOMMUNICATIONS 8 2 −6 −75%
0318-SECRETARY 6 0 −6 −100%
1001-GENERAL ARTS AND INFORMATION 7 1 −6 −85%
5406-UTILITY SYSTEMS OPERATING 7 1 −6 −85%
1315-HYDROLOGY 6 0 −6 −100%
0101-SOCIAL SCIENCE 6 0 −6 −100%
0809-CONSTRUCTION CONTROL TECHNICAL 7 1 −6 −85%
0190-GENERAL ANTHROPOLOGY 7 1 −6 −87%
0499-BIOLOGICAL SCIENCE STUDENT TRAINEE 8 3 −5 −63%
1350-GEOLOGY 5 0 −5 −100%
5409-WATER TREATMENT PLANT OPERATING 4 0 −4 −100%
0260-EQUAL EMPLOYMENT OPPORTUNITY 4 0 −4 −100%
0830-MECHANICAL ENGINEERING 4 0 −4 −100%
0081-FIRE PROTECTION AND PREVENTION 2 6 4 163%
0899-ENGINEERING AND ARCHITECTURE STUDENT TRAINEE 4 0 −4 −100%
2181-AIRCRAFT OPERATION 4 1 −4 −78%
2101-TRANSPORTATION SPECIALIST 4 0 −4 −100%
1701-GENERAL EDUCATION AND TRAINING 5 1 −4 −78%
4206-PLUMBING 4 0 −4 −100%
0454-RANGELAND MANAGEMENT 4 0 −4 −100%
0599-FINANCIAL MANAGEMENT STUDENT TRAINEE 5 1 −4 −79%
0085-SECURITY GUARD 5 1 −4 −79%
0199-SOCIAL SCIENCE STUDENT TRAINEE 3 0 −3 −100%
0430-BOTANY 3 0 −3 −100%
5048-ANIMAL CARETAKING 3 0 −3 −100%
5786-SMALL CRAFT OPERATING 8 5 −3 −41%
2299-INFORMATION TECHNOLOGY STUDENT TRAINEE 3 0 −3 −100%
0437-HORTICULTURE 3 0 −3 −100%
0080-SECURITY ADMINISTRATION 5 2 −3 −61%
1370-CARTOGRAPHY 4 1 −3 −76%
0510-ACCOUNTING 3 0 −3 −100%
4605-WOOD CRAFTING 3 0 −3 −100%
2003-SUPPLY PROGRAM MANAGEMENT 3 0 −3 −100%
0856-ELECTRONICS TECHNICAL 3 0 −3 −100%
0850-ELECTRICAL ENGINEERING 3 1 −2 −65%
1699-EQUIPMENT AND FACILITIES MANAGEMENT STUDENT TRAINEE 2 0 −2 −100%
5026-PEST CONTROLLING 2 0 −2 −100%
0180-PSYCHOLOGY 2 0 −2 −100%
1176-BUILDING MANAGEMENT 2 0 −2 −100%
6907-MATERIALS HANDLER 2 0 −2 −100%
1105-PURCHASING 2 0 −2 −100%
1316-HYDROLOGIC TECHNICIAN 3 5 2 67%
1670-EQUIPMENT SERVICES 2 0 −2 −100%
1811-CRIMINAL INVESTIGATION 2 5 2 100%
4104-SIGN PAINTING 2 4 2 100%
0803-SAFETY ENGINEERING 2 0 −2 −100%
2810-HIGH VOLTAGE ELECTRICIAN 2 0 −2 −100%
2010-INVENTORY MANAGEMENT 2 0 −2 −100%
0083-POLICE 4 6 2 45%
0505-FINANCIAL MANAGEMENT 2 0 −2 −100%
0819-ENVIRONMENTAL ENGINEERING 2 0 −2 −100%
2102-TRANSPORTATION CLERK AND ASSISTANT 2 0 −2 −100%
5306-AIR CONDITIONING EQUIPMENT MECHANIC 2 0 −2 −100%
0855-ELECTRONICS ENGINEERING 2 0 −2 −100%
1082-WRITING AND EDITING 2 0 −2 −100%
0299-HUMAN RESOURCES MANAGEMENT STUDENT TRAINEE 2 0 −2 −100%
4102-PAINTING 6 4 −2 −32%
0394-COMMUNICATIONS CLERICAL 2 0 −2 −100%
0701-VETERINARY MEDICAL SCIENCE 2 0 −2 −100%
1373-LAND SURVEYING 2 0 −2 −100%
0019-SAFETY TECHNICIAN 2 0 −2 −100%
0346-LOGISTICS MANAGEMENT 2 1 −2 −60%
1420-ARCHIVIST 5 3 −2 −45%
0828-CONSTRUCTION ANALYST 2 0 −2 −100%
1371-CARTOGRAPHIC TECHNICIAN 8 6 −2 −24%
2150-TRANSPORTATION OPERATIONS 2 1 −1 −50%
4742-UTILITY SYSTEMS REPAIRING-OPERATING 29 28 −1 −4%
0335-COMPUTER CLERK AND ASSISTANT 2 1 −1 −50%
0804-FIRE PROTECTION ENGINEERING 2 1 −1 −50%
1173-HOUSING MANAGEMENT 4 3 −1 −16%
1421-ARCHIVES TECHNICIAN 3 2 −1 −33%
0455-RANGE TECHNICIAN 3 4 1 54%
0306-GOVERNMENT INFORMATION SPECIALIST 2 1 −1 −55%
0089-EMERGENCY MANAGEMENT SPECIALIST 2 1 −1 −50%
0326-OFFICE AUTOMATION CLERICAL AND ASSISTANCE 4 4 0 12%
5788-DECKHAND 2 2 0 −9%
0640-HEALTH AID AND TECHNICIAN 8 8 0 4%
0482-FISH BIOLOGY 2 2 0 −12%
Source: github.com/abigailhaddad/usajobs_historical

Are There More Group Announcements in 2025?

Code
def categorize_openings(opening_val):
    """Categorize openings (MANY=Many, FEW=Few, etc.)"""
    opening_str = str(opening_val).upper()
    if opening_str in ['MANY', 'FEW', 'SEVERAL']:
        return opening_str.title()
    else:
        try:
            return str(int(float(opening_val)))
        except:
            return 'Other'

def analyze_opening_types():
    f"""Analyze opening types comparison between {comparison_period} 2018-2024 and {comparison_period} 2025"""
    # Use pre-filtered comparison period datasets and filter for jobs with opening data
    openings_2025 = nps_2025_comparison[nps_2025_comparison['totalOpenings'].notna()].copy()
    openings_historical = nps_historical_comparison[nps_historical_comparison['totalOpenings'].notna()].copy()
    
    # Apply categorization
    openings_2025.loc[:, 'opening_category'] = openings_2025['totalOpenings'].apply(categorize_openings)
    openings_historical.loc[:, 'opening_category'] = openings_historical['totalOpenings'].apply(categorize_openings)
    
    # Get top 10 categories for each period
    hist_top10 = openings_historical['opening_category'].value_counts().head(10)
    curr_top10 = openings_2025['opening_category'].value_counts().head(10)
    
    # Calculate percentages
    hist_pcts = (hist_top10 / len(openings_historical) * 100).round(0)
    curr_pcts = (curr_top10 / len(openings_2025) * 100).round(0)
    
    # Create simple comparison table
    comparison_data = []
    all_categories = set(hist_top10.index) | set(curr_top10.index)

    
    for category in sorted(all_categories):
        hist_pct = hist_pcts.get(category, 0)
        curr_pct = curr_pcts.get(category, 0)
        comparison_data.append({
            'Total Openings': category,
            'Historical\n%': hist_pct,
            '2025\n%': curr_pct,
            'Change': round(curr_pct - hist_pct, 0)
        })
    
    comparison_df = pd.DataFrame(comparison_data)
    comparison_df = comparison_df.sort_values('Historical\n%', ascending=False)
    
    # Display with Great Tables
    gt_openings_df = comparison_df.reset_index(drop=True)
    
    gt_openings = (
        create_standard_gt_table(
            data=gt_openings_df,
            title="National Park Service: Top 10 Total Openings Comparison",
            subtitle=f"{comparison_period} 2018-2024 vs {comparison_period} 2025",
            align_left_cols=["Total Openings"],
            align_center_cols=["Historical\n%", "2025\n%", "Change"],
            col_widths={
                "Total Openings": "35%",
                "Historical\n%": "22%",
                "2025\n%": "22%",
                "Change": "21%"
            }
        )
        .fmt_number(
            columns=["Historical\n%", "2025\n%", "Change"],
            decimals=0,
            pattern="{x}%"
        )
        .data_color(
            columns=["Change"],
            palette=["red", "white", "green"],
            domain=[-25, 25]
        )
    )
    
    gt_openings.show()

# Run the analysis
analyze_opening_types()
National Park Service: Top 10 Total Openings Comparison
Jan-Aug 2018-2024 vs Jan-Aug 2025 | USAJobs Historical Data
Total Openings Historical % 2025 % Change
1 70% 52% −18%
2 11% 16% 5%
Many 5% 12% 7%
Few 5% 4% −1%
3 4% 5% 1%
4 2% 3% 1%
6 1% 2% 1%
5 1% 1% 0%
12 0% 1% 1%
10 0% 0% 0%
8 0% 1% 1%
7 0% 0% 0%
Source: github.com/abigailhaddad/usajobs_historical

Code
from IPython.display import display, Markdown
dt_info = get_current_datetime()
display(Markdown(f"*Analysis generated on {dt_info['formatted']}*"))

Analysis generated on 2025-09-03 10:08:09