Back to Blog
6 min read

Azure NetApp Files: Enterprise-Grade File Storage for Demanding Workloads

Azure NetApp Files is an enterprise-grade, high-performance file storage service powered by NetApp technology. It’s designed for the most demanding workloads including SAP HANA, high-performance computing, and enterprise applications requiring sub-millisecond latency.

Understanding Azure NetApp Files Architecture

Azure NetApp Files uses a hierarchical structure:

  • NetApp Account: Top-level container
  • Capacity Pool: Provisioned storage with performance tier
  • Volume: Actual file share mounted by clients

Creating Azure NetApp Files Resources

# Register the NetApp resource provider
az provider register --namespace Microsoft.NetApp

# Create NetApp account
az netappfiles account create \
    --resource-group myResourceGroup \
    --location eastus \
    --account-name mynetappaccount

# Create capacity pool (Ultra tier for highest performance)
az netappfiles pool create \
    --resource-group myResourceGroup \
    --location eastus \
    --account-name mynetappaccount \
    --pool-name mypool \
    --size 4 \
    --service-level Ultra

# Create volume
az netappfiles volume create \
    --resource-group myResourceGroup \
    --location eastus \
    --account-name mynetappaccount \
    --pool-name mypool \
    --name myvolume \
    --file-path myvolumepath \
    --vnet myVNet \
    --subnet myANFSubnet \
    --protocol-types NFSv3 \
    --service-level Ultra \
    --usage-threshold 1024

Using Terraform:

resource "azurerm_netapp_account" "main" {
  name                = "mynetappaccount"
  resource_group_name = azurerm_resource_group.main.name
  location            = azurerm_resource_group.main.location
}

resource "azurerm_netapp_pool" "main" {
  name                = "ultra-pool"
  resource_group_name = azurerm_resource_group.main.name
  location            = azurerm_resource_group.main.location
  account_name        = azurerm_netapp_account.main.name
  service_level       = "Ultra"
  size_in_tb          = 4
}

resource "azurerm_netapp_volume" "main" {
  name                = "high-perf-volume"
  resource_group_name = azurerm_resource_group.main.name
  location            = azurerm_resource_group.main.location
  account_name        = azurerm_netapp_account.main.name
  pool_name           = azurerm_netapp_pool.main.name
  volume_path         = "high-perf-volume"
  service_level       = "Ultra"
  subnet_id           = azurerm_subnet.anf.id
  protocols           = ["NFSv4.1"]
  storage_quota_in_gb = 1024

  export_policy_rule {
    rule_index        = 1
    allowed_clients   = ["10.0.0.0/16"]
    protocols_enabled = ["NFSv4.1"]
    unix_read_write   = true
  }
}

Service Level Performance

# Python - Understanding service level performance
"""
Azure NetApp Files Service Levels:

| Service Level | Throughput per TiB |
|--------------|-------------------|
| Standard     | 16 MiB/s          |
| Premium      | 64 MiB/s          |
| Ultra        | 128 MiB/s         |

Example calculations:
- 4 TiB Ultra volume = 4 * 128 = 512 MiB/s throughput
- 4 TiB Premium volume = 4 * 64 = 256 MiB/s throughput
"""

class ANFPerformanceCalculator:
    SERVICE_LEVELS = {
        'Standard': 16,   # MiB/s per TiB
        'Premium': 64,
        'Ultra': 128
    }

    def calculate_throughput(self, service_level, size_tib):
        """Calculate maximum throughput for a volume"""
        if service_level not in self.SERVICE_LEVELS:
            raise ValueError(f"Invalid service level: {service_level}")

        throughput_mibs = self.SERVICE_LEVELS[service_level] * size_tib
        return {
            'service_level': service_level,
            'size_tib': size_tib,
            'max_throughput_mibs': throughput_mibs,
            'max_throughput_mbs': throughput_mibs * 1.048576  # Convert to MB/s
        }

    def recommend_configuration(self, required_throughput_mibs, max_budget=None):
        """Recommend optimal configuration for required throughput"""
        recommendations = []

        for level, throughput_per_tib in self.SERVICE_LEVELS.items():
            size_needed = required_throughput_mibs / throughput_per_tib
            size_needed = max(4, int(size_needed) + 1)  # Minimum 4 TiB

            recommendations.append({
                'service_level': level,
                'size_tib': size_needed,
                'actual_throughput': size_needed * throughput_per_tib
            })

        return sorted(recommendations, key=lambda x: x['size_tib'])

Mounting Azure NetApp Files Volumes

# Mount NFSv4.1 volume on Linux
sudo mkdir -p /mnt/anfvolume

# Get mount instructions from Azure portal or CLI
MOUNT_IP=$(az netappfiles volume show \
    --resource-group myResourceGroup \
    --account-name mynetappaccount \
    --pool-name mypool \
    --name myvolume \
    --query mountTargets[0].ipAddress -o tsv)

# Mount with optimized options
sudo mount -t nfs -o rw,hard,rsize=65536,wsize=65536,vers=4.1,tcp \
    $MOUNT_IP:/myvolumepath /mnt/anfvolume

# Add to fstab for persistence
echo "$MOUNT_IP:/myvolumepath /mnt/anfvolume nfs rw,hard,rsize=65536,wsize=65536,vers=4.1,tcp 0 0" | sudo tee -a /etc/fstab

Snapshot Management

// C# - Managing Azure NetApp Files snapshots
using Azure.ResourceManager.NetApp;

public class ANFSnapshotManager
{
    private readonly ArmClient _armClient;

    public async Task<SnapshotResource> CreateSnapshotAsync(
        string resourceGroup,
        string accountName,
        string poolName,
        string volumeName,
        string snapshotName)
    {
        var volume = await GetVolumeAsync(
            resourceGroup, accountName, poolName, volumeName);

        var snapshotData = new SnapshotData(volume.Data.Location);

        var snapshot = await volume.GetSnapshots()
            .CreateOrUpdateAsync(WaitUntil.Completed, snapshotName, snapshotData);

        return snapshot.Value;
    }

    public async Task<List<SnapshotResource>> ListSnapshotsAsync(
        string resourceGroup,
        string accountName,
        string poolName,
        string volumeName)
    {
        var volume = await GetVolumeAsync(
            resourceGroup, accountName, poolName, volumeName);

        var snapshots = new List<SnapshotResource>();

        await foreach (var snapshot in volume.GetSnapshots().GetAllAsync())
        {
            snapshots.Add(snapshot);
        }

        return snapshots;
    }

    public async Task<VolumeResource> RestoreFromSnapshotAsync(
        string resourceGroup,
        string accountName,
        string poolName,
        string volumeName,
        string snapshotId)
    {
        var volume = await GetVolumeAsync(
            resourceGroup, accountName, poolName, volumeName);

        var revertContent = new VolumeRevert { SnapshotId = snapshotId };

        await volume.RevertAsync(WaitUntil.Completed, revertContent);

        return volume;
    }

    public async Task ConfigureSnapshotPolicyAsync(
        string resourceGroup,
        string accountName,
        string policyName)
    {
        var account = await GetAccountAsync(resourceGroup, accountName);

        var policyData = new SnapshotPolicyData(account.Data.Location)
        {
            HourlySchedule = new HourlySchedule
            {
                SnapshotsToKeep = 24,
                Minute = 0
            },
            DailySchedule = new DailySchedule
            {
                SnapshotsToKeep = 7,
                Hour = 0,
                Minute = 0
            },
            WeeklySchedule = new WeeklySchedule
            {
                SnapshotsToKeep = 4,
                Day = "Sunday",
                Hour = 0,
                Minute = 0
            },
            IsEnabled = true
        };

        await account.GetSnapshotPolicies()
            .CreateOrUpdateAsync(WaitUntil.Completed, policyName, policyData);
    }
}

Cross-Region Replication

# Create replication relationship
az netappfiles volume replication create \
    --resource-group myResourceGroup \
    --account-name mynetappaccount \
    --pool-name mypool \
    --name myvolume \
    --remote-account-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.NetApp/netAppAccounts/myremoteaccount \
    --remote-pool-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.NetApp/netAppAccounts/myremoteaccount/capacityPools/myremotepool \
    --remote-volume-resource-id /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.NetApp/netAppAccounts/myremoteaccount/capacityPools/myremotepool/volumes/myremotevolume \
    --replication-schedule hourly

# Check replication status
az netappfiles volume replication status \
    --resource-group myResourceGroup \
    --account-name mynetappaccount \
    --pool-name mypool \
    --name myvolume

Monitoring and Performance

# Python - Monitor ANF metrics
from azure.mgmt.monitor import MonitorManagementClient
from azure.identity import DefaultAzureCredential
from datetime import datetime, timedelta

class ANFMonitor:
    def __init__(self, subscription_id):
        self.credential = DefaultAzureCredential()
        self.monitor_client = MonitorManagementClient(
            self.credential, subscription_id
        )

    def get_volume_metrics(self, volume_id, hours=24):
        """Get performance metrics for a volume"""
        end_time = datetime.utcnow()
        start_time = end_time - timedelta(hours=hours)

        metrics_data = self.monitor_client.metrics.list(
            volume_id,
            timespan=f"{start_time.isoformat()}/{end_time.isoformat()}",
            interval='PT1H',
            metricnames='VolumeLogicalSize,ReadIops,WriteIops,ReadThroughput,WriteThroughput',
            aggregation='Average'
        )

        results = {}
        for metric in metrics_data.value:
            values = []
            for timeseries in metric.timeseries:
                for data in timeseries.data:
                    if data.average is not None:
                        values.append({
                            'timestamp': data.time_stamp,
                            'value': data.average
                        })
            results[metric.name.value] = values

        return results

    def check_capacity_alerts(self, volume_id, threshold_percent=80):
        """Check if volume is approaching capacity"""
        metrics = self.get_volume_metrics(volume_id, hours=1)

        if 'VolumeLogicalSize' in metrics and metrics['VolumeLogicalSize']:
            current_size = metrics['VolumeLogicalSize'][-1]['value']
            # Compare with quota...
            return current_size
        return None

Best Practices

  1. Size pools appropriately: Performance scales with size
  2. Use delegated subnet: Dedicated /28 subnet for ANF
  3. Plan for snapshots: Snapshots consume volume quota
  4. Monitor performance: Track IOPS and throughput
  5. Test disaster recovery: Validate replication regularly

Azure NetApp Files delivers enterprise storage performance in the cloud, making it the ideal choice for mission-critical applications that demand consistent, low-latency access to shared file data.

Michael John Peña

Michael John Peña

Senior Data Engineer based in Sydney. Writing about data, cloud, and technology.