Skip to content
22 changes: 22 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Fixed

- Fix `ffnet` installation for Python 3.13+ and modern pip (26.x):
- Remove `--no-use-pep517` flag, which was dropped in pip 23.1
- Add `--no-build-isolation` for Python 3.13+ so that `numpy` (required at
Fortran compile time) is visible to pip's build backend instead of being
hidden inside an isolated sandbox
- Simplify misleading echo message in the ffnet install block
- Remove `--show-channel-urls` flag from `mamba list` calls; the flag is not
supported by mamba 2.x and caused the end-of-install package listing to be
skipped with a spurious warning

### Changed

- Update example Python version to 3.14
- Update example Miniforge version to 26.1.0-0
- Disable TensorFlow installation for Python 3.14 (see https://github.com/tensorflow/tensorflow/issues/102890)

### Added

- Explicit Conda Packages
- basemap (latest version for Python 3.14 support only on conda-forge)
- Explicit Pip Packages
- python-docx

### Removed

- Explicit Pip Packages
- basemap (latest version for Python 3.14 support only on conda-forge)

### Deprecated

## [25.3.1] - 2025-10-02
Expand Down
34 changes: 25 additions & 9 deletions install_miniforge.bash
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ fi
# Usage
# -----

EXAMPLE_PY_VERSION="3.13"
EXAMPLE_MINI_VERSION="25.3.1-0"
EXAMPLE_PY_VERSION="3.14"
EXAMPLE_MINI_VERSION="26.1.0-0"
EXAMPLE_INSTALLDIR="/opt/GEOSpyD"
EXAMPLE_DATE=$(date +%F)
usage() {
Expand Down Expand Up @@ -605,6 +605,8 @@ $PACKAGE_INSTALL uxarray

$PACKAGE_INSTALL rasterio contextily

$PACKAGE_INSTALL basemap

# Only install pythran on linux. On mac it brings in an old clang
if [[ $MINIFORGE_ARCH == Linux ]]
then
Expand Down Expand Up @@ -651,26 +653,34 @@ $PIP_INSTALL PyRTF3 pipenv pymp-pypi h5py
$PIP_INSTALL pycircleci metpy siphon questionary xgrads
$PIP_INSTALL ruamel.yaml
$PIP_INSTALL xgboost
$PIP_INSTALL tensorflow evidential-deep-learning silence_tensorflow

# Tensorflow does not support Python 3.14 yet
# https://github.com/tensorflow/tensorflow/issues/102890
if [[ $PYTHON_VER_WITHOUT_DOT -ge 314 ]]
then
echo "Skipping tensorflow installation as Python $PYTHON_VER is 3.14 or higher"
else
$PIP_INSTALL tensorflow evidential-deep-learning silence_tensorflow
fi
$PIP_INSTALL torch
$PIP_INSTALL yaplon
$PIP_INSTALL lxml
$PIP_INSTALL juliandate
$PIP_INSTALL pybufrkit
$PIP_INSTALL pyephem
$PIP_INSTALL basemap
$PIP_INSTALL redis
$PIP_INSTALL Flask
$PIP_INSTALL goes2go
$PIP_INSTALL nco
$PIP_INSTALL cdo
$PIP_INSTALL ecmwf-opendata
$PIP_INSTALL python-docx

# some packages require a Fortran compiler. This sometimes isn't available
# on macs (though usually is)
if [[ $FORTRAN_AVAILABLE == TRUE ]]
then
echo "We have a Fortran compiler and are Python 3.12 or older. Installing ffnet"
echo "We have a Fortran compiler. Installing ffnet"
# we need to install ffnet from https://github.com/mrkwjc/ffnet.git
# This is because the version in PyPI is not compatible with Python 3
# and latest scipy
Expand All @@ -682,8 +692,14 @@ then
if [[ $PYTHON_VER_WITHOUT_DOT -ge 313 ]]
then
$PIP_INSTALL setuptools wheel
# We also need a new flag for Python 3.13
EXTRA_PIP_FLAGS='--no-use-pep517'
fi
# For Python 3.13+, pip's isolated build environment does not inherit the
# conda env packages (e.g. numpy), which ffnet needs at build time. Passing
# --no-build-isolation tells pip to use the already-installed packages from
# the conda env instead of creating a fresh isolated sandbox.
if [[ $PYTHON_VER_WITHOUT_DOT -ge 313 ]]
then
EXTRA_PIP_FLAGS='--no-build-isolation'
else
EXTRA_PIP_FLAGS=''
fi
Expand Down Expand Up @@ -755,8 +771,8 @@ $PIP_INSTALL prompt_toolkit
# Use mamba to output list of packages installed
# ----------------------------------------------
cd $MINIFORGE_ENVDIR
$MINIFORGE_BINDIR/mamba list -n $MINIFORGE_ENVNAME --show-channel-urls --explicit > distribution_spec_file.txt
$MINIFORGE_BINDIR/mamba list -n $MINIFORGE_ENVNAME --show-channel-urls > mamba_list_packages.txt
$MINIFORGE_BINDIR/mamba list -n $MINIFORGE_ENVNAME --explicit > distribution_spec_file.txt
$MINIFORGE_BINDIR/mamba list -n $MINIFORGE_ENVNAME > mamba_list_packages.txt
Comment thread
mathomp4 marked this conversation as resolved.
Outdated
./bin/pip freeze > pip_freeze_packages.txt

# Restore User's .mambarc and .condarc using cleanup function
Expand Down
13 changes: 11 additions & 2 deletions tests/torch_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,17 @@


dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
if torch.cuda.is_available():
device = torch.device("cuda:0")
print(f"Using GPU: {torch.cuda.get_device_name(0)}")
print(f"GPU memory allocated: {torch.cuda.memory_allocated(0) / 1024**2:.2f} MB")
print(f"GPU memory cached: {torch.cuda.memory_reserved(0) / 1024**2:.2f} MB")
print(f"CUDA device count: {torch.cuda.device_count()}")
print(f"Current CUDA device: {torch.cuda.current_device()}")
print(f"CUDA device name: {torch.cuda.get_device_name(0)}")
else:
device = torch.device("cpu")
print("CUDA not available, using CPU")

# Create random input and output data
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
Expand Down
98 changes: 98 additions & 0 deletions tests/torch_example_like_tflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
#!/usr/bin/env python3

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader

print("PyTorch version:", torch.__version__)
print("CUDA available:", torch.cuda.is_available())

# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Load MNIST dataset
transform = transforms.Compose([
transforms.ToTensor(), # This automatically normalizes to [0, 1]
])

train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# Define the model
model = nn.Sequential(
nn.Flatten(),
nn.Linear(28 * 28, 128),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 10)
).to(device)

print(model)

# Loss and optimizer
criterion = nn.CrossEntropyLoss() # Combines softmax and negative log likelihood
optimizer = optim.Adam(model.parameters())

# Training loop
epochs = 5
for epoch in range(epochs):
model.train()
running_loss = 0.0
correct = 0
total = 0

for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)

# Zero the gradients
optimizer.zero_grad()

# Forward pass
output = model(data)
loss = criterion(output, target)

# Backward pass and optimize
loss.backward()
optimizer.step()

# Statistics
running_loss += loss.item()
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()

accuracy = 100 * correct / total
avg_loss = running_loss / len(train_loader)
print(f'Epoch {epoch + 1}/{epochs} - Loss: {avg_loss:.4f}, Accuracy: {accuracy:.2f}%')

# Evaluation
model.eval()
correct = 0
total = 0

with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()

test_accuracy = 100 * correct / total
print(f'\nTest Accuracy: {test_accuracy:.2f}%')

# Get probabilities for first 5 test samples
model.eval()
with torch.no_grad():
test_data, _ = next(iter(test_loader))
test_data = test_data[:5].to(device)
logits = model(test_data)
probabilities = torch.softmax(logits, dim=1)
print("\nProbabilities for first 5 test samples:")
print(probabilities)
Loading