Filesystem Mounting Guide
This comprehensive guide covers deep commands and setup procedures for mounting various filesystem types including NFS, CIFS, SSHFS, GlusterFS, and other advanced distributed filesystems with /etc/fstab configuration.
Table of Contents
- NFS (Network File System)
- CIFS (Common Internet File System / SMB)
- SSHFS (SSH Filesystem)
- GlusterFS
- CephFS
- OCFS2 (Oracle Cluster File System)
- HDFS (Hadoop Distributed File System)
- S3FS (Amazon S3 Filesystem)
- Mount Options Explained
- General Mounting Commands
- Troubleshooting
NFS (Network File System)
Prerequisites
# Install NFS utilities
sudo apt-get update
sudo apt-get install nfs-common nfs-utils # Ubuntu/Debian
sudo yum install nfs-utils # CentOS/RHEL
sudo dnf install nfs-utils # Fedora
Manual Mount Commands
# Basic NFS mount
sudo mount -t nfs server_ip:/path/to/share /local/mount/point
# NFS with specific version
sudo mount -t nfs -o vers=4 server_ip:/path/to/share /local/mount/point
# NFS with additional options
sudo mount -t nfs -o vers=4,rsize=8192,wsize=8192,hard,intr server_ip:/path/to/share /local/mount/point
# Mount with specific user permissions
sudo mount -t nfs -o vers=4,uid=1000,gid=1000 server_ip:/path/to/share /local/mount/point
Deep NFS Options with Benefits
# Performance tuning options
sudo mount -t nfs -o vers=4,rsize=32768,wsize=32768,timeo=14,intr server_ip:/share /mount/point
# Benefits: Larger read/write buffers improve throughput, intr allows interruption of hung operations
# Security options
sudo mount -t nfs -o vers=4,sec=krb5 server_ip:/share /mount/point
# Benefits: Kerberos authentication provides strong security for enterprise environments
# Read-only mount
sudo mount -t nfs -o vers=4,ro server_ip:/share /mount/point
# Benefits: Prevents accidental modifications, useful for shared read-only data
# Soft mount (fails after timeout)
sudo mount -t nfs -o vers=4,soft,timeo=10,retrans=3 server_ip:/share /mount/point
# Benefits: Prevents applications from hanging indefinitely when server is unavailable
/etc/fstab Configuration for NFS
# Basic NFS entry
server_ip:/path/to/share /local/mount/point nfs defaults 0 0
# NFS4 with performance options
server_ip:/path/to/share /local/mount/point nfs4 rsize=32768,wsize=32768,hard,intr,timeo=14 0 0
# NFS with user permissions
server_ip:/path/to/share /local/mount/point nfs4 defaults,uid=1000,gid=1000,noauto 0 0
# NFS with security
server_ip:/path/to/share /local/mount/point nfs4 sec=krb5,rsize=8192,wsize=8192 0 0
CIFS (Common Internet File System / SMB)
Prerequisites
# Install CIFS utilities
sudo apt-get install cifs-utils # Ubuntu/Debian
sudo yum install cifs-utils # CentOS/RHEL
sudo dnf install cifs-utils # Fedora
Manual Mount Commands with Benefits
# Basic CIFS mount
sudo mount -t cifs //server_ip/share /local/mount/point -o username=user,password=pass
# CIFS with credentials file (Security benefit: passwords not visible in process list)
sudo mount -t cifs //server_ip/share /local/mount/point -o credentials=/path/to/credentials
# CIFS with specific SMB version (Compatibility: ensures protocol version compatibility)
sudo mount -t cifs //server_ip/share /local/mount/point -o username=user,vers=3.0
# Performance and security options
sudo mount -t cifs //server/share /mount/point -o username=user,vers=3.0,cache=strict,rsize=1048576,wsize=1048576
# Benefits: cache=strict improves performance, larger buffer sizes increase throughput
/etc/fstab Configuration for CIFS
# CIFS with performance options
//server_ip/share /local/mount/point cifs credentials=/etc/cifs-credentials,vers=3.0,cache=strict,rsize=1048576,wsize=1048576 0 0
SSHFS (SSH Filesystem)
Prerequisites
# Install SSHFS
sudo apt-get install sshfs # Ubuntu/Debian
sudo yum install fuse-sshfs # CentOS/RHEL
sudo dnf install fuse-sshfs # Fedora
Manual Mount Commands with Benefits
# Performance optimization
sshfs -o cache=yes,kernel_cache,compression=yes,large_read user@server:/path /mount/point
# Benefits: Caching reduces network calls, compression saves bandwidth, large_read improves performance
# With reconnect capability
sshfs -o reconnect,ServerAliveInterval=15,ServerAliveCountMax=3 user@server:/path /mount/point
# Benefits: Automatically reconnects on network interruptions, maintains session stability
GlusterFS
Prerequisites
# Install GlusterFS client
sudo apt-get install glusterfs-client # Ubuntu/Debian
sudo yum install glusterfs-fuse # CentOS/RHEL
sudo dnf install glusterfs-fuse # Fedora
# Verify installation
gluster --version
GlusterFS Server Setup (Brief)
# On each GlusterFS server node
sudo systemctl start glusterd
sudo systemctl enable glusterd
# Create trusted storage pool (run on one node)
sudo gluster peer probe server2.example.com
sudo gluster peer probe server3.example.com
# Create volume
sudo gluster volume create gv0 replica 3 server1:/data/brick1 server2:/data/brick1 server3:/data/brick1
sudo gluster volume start gv0
Manual GlusterFS Mount Commands
# Basic GlusterFS mount
sudo mount -t glusterfs server1:/volume_name /local/mount/point
# GlusterFS with backup volfile servers (High Availability)
sudo mount -t glusterfs -o backup-volfile-servers=server2:server3 server1:/volume_name /mount/point
# Benefits: Automatic failover if primary server becomes unavailable
# GlusterFS with performance options
sudo mount -t glusterfs -o log-level=WARNING,log-file=/var/log/gluster.log server1:/volume /mount/point
# Benefits: Controlled logging reduces overhead, centralized log management
# GlusterFS with direct I/O (bypasses page cache)
sudo mount -t glusterfs -o direct-io-mode=enable server1:/volume /mount/point
# Benefits: Reduces memory usage, better for large file operations
# GlusterFS with attribute timeout optimization
sudo mount -t glusterfs -o attribute-timeout=600,entry-timeout=600 server1:/volume /mount/point
# Benefits: Reduces metadata operations, improves performance for stable directories
Advanced GlusterFS Mount Options
# Performance tuning for different workloads
# For small files workload
sudo mount -t glusterfs -o performance.read-ahead=off,performance.quick-read=on server1:/vol /mount
# Benefits: Optimized for small file operations
# For large files workload
sudo mount -t glusterfs -o performance.read-ahead=on,performance.cache-size=256MB server1:/vol /mount
# Benefits: Prefetching improves sequential read performance
# For write-heavy workload
sudo mount -t glusterfs -o performance.write-behind=on,performance.flush-behind=on server1:/vol /mount
# Benefits: Write coalescing improves write performance
# Security with SSL/TLS
sudo mount -t glusterfs -o transport=ssl,ssl-cert-depth=2 server1:/volume /mount/point
# Benefits: Encrypted data transmission, secure authentication
/etc/fstab Configuration for GlusterFS
# Basic GlusterFS entry
server1:/volume_name /mnt/glusterfs glusterfs defaults,_netdev 0 0
# GlusterFS with backup servers and performance options
server1:/volume_name /mnt/glusterfs glusterfs defaults,_netdev,backup-volfile-servers=server2:server3,log-level=WARNING 0 0
# GlusterFS optimized for performance
server1:/volume_name /mnt/glusterfs glusterfs defaults,_netdev,attribute-timeout=600,entry-timeout=600,performance.cache-size=256MB 0 0
# GlusterFS with SSL security
server1:/volume_name /mnt/glusterfs glusterfs defaults,_netdev,transport=ssl,ssl-cert-depth=2 0 0
GlusterFS Management Commands
# Check volume info
sudo gluster volume info
# Check volume status
sudo gluster volume status
# Check peer status
sudo gluster peer status
# Monitor volume performance
sudo gluster volume top volume_name read-perf
sudo gluster volume top volume_name write-perf
# Volume healing (for replicated volumes)
sudo gluster volume heal volume_name
CephFS
Prerequisites
# Install Ceph client
sudo apt-get install ceph-fuse ceph-common # Ubuntu/Debian
sudo yum install ceph-fuse # CentOS/RHEL
sudo dnf install ceph-fuse # Fedora
Manual CephFS Mount Commands
# Basic CephFS mount using ceph-fuse
sudo ceph-fuse /mount/point
# CephFS with specific monitor servers
sudo ceph-fuse -m mon1:6789,mon2:6789,mon3:6789 /mount/point
# Benefits: Multiple monitors provide high availability
# CephFS with kernel mount
sudo mount -t ceph mon1:6789,mon2:6789:/ /mount/point -o name=admin,secret=AQD...
# Benefits: Kernel mount typically offers better performance than FUSE
# CephFS with specific user and keyring
sudo ceph-fuse /mount/point --name client.admin --keyring /etc/ceph/ceph.client.admin.keyring
# Benefits: Proper authentication and authorization
# CephFS with performance options
sudo mount -t ceph mon1:6789:/ /mount/point -o name=admin,secret=AQD...,cache,rsize=16777216
# Benefits: Client-side caching and larger read buffers improve performance
/etc/fstab Configuration for CephFS
# CephFS kernel mount
mon1:6789,mon2:6789,mon3:6789:/ /mnt/cephfs ceph name=admin,secret=AQD...,_netdev,noatime 0 0
# CephFS FUSE mount
none /mnt/cephfs fuse.ceph ceph.name=admin,ceph.keyring=/etc/ceph/ceph.client.admin.keyring,_netdev 0 0
OCFS2 (Oracle Cluster File System)
Prerequisites
# Install OCFS2 tools
sudo apt-get install ocfs2-tools # Ubuntu/Debian
sudo yum install ocfs2-tools # CentOS/RHEL
# Configure cluster
sudo vi /etc/ocfs2/cluster.conf
OCFS2 Configuration Example
# /etc/ocfs2/cluster.conf
node:
ip_port = 7777
ip_address = 192.168.1.10
number = 1
name = node1
cluster = ocfs2
node:
ip_port = 7777
ip_address = 192.168.1.11
number = 2
name = node2
cluster = ocfs2
cluster:
node_count = 2
name = ocfs2
Manual OCFS2 Mount Commands
# Basic OCFS2 mount
sudo mount -t ocfs2 /dev/shared_disk /mount/point
# OCFS2 with datavolume and cluster options
sudo mount -t ocfs2 -o datavolume,nointr /dev/shared_disk /mount/point
# Benefits: datavolume optimizes for database workloads, nointr prevents interruption
/etc/fstab Configuration for OCFS2
# OCFS2 entry
/dev/shared_disk /mnt/ocfs2 ocfs2 _netdev,datavolume,nointr 0 0
HDFS (Hadoop Distributed File System)
Prerequisites
# Install Hadoop FUSE connector
wget https://archive.apache.org/dist/hadoop/common/hadoop-2.10.1/hadoop-2.10.1.tar.gz
tar -xzf hadoop-2.10.1.tar.gz
sudo mv hadoop-2.10.1 /opt/hadoop
# Set environment
export HADOOP_HOME=/opt/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
Manual HDFS Mount Commands
# Mount HDFS using hadoop-fuse-dfs
sudo $HADOOP_HOME/bin/hadoop-fuse-dfs hdfs://namenode:9000 /mount/point
# Benefits: Access HDFS like a regular filesystem
# HDFS with specific user
sudo -u hdfs $HADOOP_HOME/bin/hadoop-fuse-dfs hdfs://namenode:9000 /mount/point -o allow_other
# Benefits: Proper permissions and multi-user access
/etc/fstab Configuration for HDFS
# HDFS entry (requires custom script)
hdfs://namenode:9000 /mnt/hdfs fuse.hadoop-fuse-dfs defaults,_netdev,user,noauto 0 0
S3FS (Amazon S3 Filesystem)
Prerequisites
# Install S3FS
sudo apt-get install s3fs # Ubuntu/Debian
sudo yum install s3fs-fuse # CentOS/RHEL
sudo dnf install s3fs-fuse # Fedora
# Configure credentials
echo "ACCESS_KEY:SECRET_KEY" > ~/.passwd-s3fs
chmod 600 ~/.passwd-s3fs
Manual S3FS Mount Commands
# Basic S3 mount
s3fs bucket_name /mount/point -o passwd_file=~/.passwd-s3fs
# S3 with specific region and performance options
s3fs bucket_name /mount/point -o passwd_file=~/.passwd-s3fs,url=https://s3.region.amazonaws.com,use_cache=/tmp,parallel_count=10
# Benefits: Regional endpoint reduces latency, caching improves performance, parallel uploads increase throughput
# S3 with multipart upload for large files
s3fs bucket_name /mount/point -o passwd_file=~/.passwd-s3fs,multipart_size=64,parallel_count=10
# Benefits: Efficient handling of large files, improved upload performance
/etc/fstab Configuration for S3FS
# S3FS entry
bucket_name /mnt/s3fs fuse.s3fs _netdev,passwd_file=/home/user/.passwd-s3fs,url=https://s3.region.amazonaws.com,use_cache=/tmp 0 0
Mount Options Explained
Why Use Specific Mount Options?
Network and Reliability Options
| Option | Purpose | Benefits | When to Use |
|---|---|---|---|
_netdev | Wait for network before mounting | Prevents boot failures | All network filesystems in fstab |
noauto | Don't mount automatically | Manual control, testing | Testing new mounts, conditional mounts |
retry=n | Number of mount retry attempts | Handles temporary network issues | Unreliable networks |
soft | Fail after timeout | Prevents application hangs | Non-critical mounts |
hard | Retry indefinitely | Ensures data integrity | Critical data mounts |
intr | Allow interruption of operations | User can break hung operations | Interactive environments |
Performance Options
| Option | Purpose | Benefits | When to Use |
|---|---|---|---|
rsize=n | Read buffer size | Improves read throughput | Large file operations |
wsize=n | Write buffer size | Improves write throughput | Heavy write workloads |
cache=mode | Caching behavior | Reduces network overhead | Stable file content |
compression | Enable compression | Saves bandwidth | Slow networks, compressible data |
async | Asynchronous I/O | Better performance | When data loss risk is acceptable |
noatime | Don't update access times | Reduces write operations | Read-heavy workloads |
Security Options
| Option | Purpose | Benefits | When to Use |
|---|---|---|---|
sec=mode | Security method | Strong authentication | Enterprise environments |
credentials=file | External credential file | Secure password storage | Production systems |
uid/gid=n | Force user/group ID | Consistent permissions | Multi-user environments |
umask=n | Default permission mask | Security control | Shared filesystems |
ro | Read-only mount | Prevents modifications | Archive/backup data |
Protocol-Specific Options
NFS Options
# vers=4 - Use NFSv4 (security, performance improvements)
# proto=tcp - Force TCP transport (reliability over UDP)
# port=2049 - Specific port (firewall configurations)
# timeo=600 - Timeout in deciseconds (network reliability)
# retrans=2 - Number of retransmissions (error recovery)
CIFS Options
# vers=3.0 - SMB protocol version (compatibility, security)
# domain=name - Windows domain (authentication)
# seal - SMB3 encryption (data protection)
# cache=strict - Caching mode (performance vs consistency)
# nobrl - Disable byte-range locking (compatibility)
GlusterFS Options
# backup-volfile-servers - Redundant servers (high availability)
# log-level=WARNING - Logging verbosity (performance)
# attribute-timeout=600 - Metadata caching (performance)
# direct-io-mode=enable - Bypass page cache (memory efficiency)
# transport=ssl - Encrypted transport (security)
Option Selection Guidelines
For Production Environments
# High availability
_netdev,backup-volfile-servers=server2:server3
# Performance optimization
rsize=32768,wsize=32768,cache=strict
# Security
sec=krb5,transport=ssl
# Reliability
hard,intr,timeo=600,retrans=2
For Development/Testing
# Flexibility
noauto,user,rw
# Debugging
log-level=DEBUG,debug
# Quick setup
soft,timeo=100
For Different Workloads
Database Workloads:
# OCFS2 example
datavolume,nointr # Optimized for database I/O patterns
Big Data/Analytics:
# Large files, sequential access
rsize=1048576,wsize=1048576,performance.read-ahead=on
Small Files/Web Content:
# Many small files, random access
performance.quick-read=on,attribute-timeout=3600
Backup/Archive:
# Read-mostly, space-efficient
ro,compression=yes,cache=ro
General Mounting Commands
Create Mount Points
# Create mount directories
sudo mkdir -p /mnt/{nfs,cifs,sshfs,glusterfs,cephfs,ocfs2,hdfs,s3fs}
# Set appropriate permissions
sudo chown $USER:$USER /mnt/sshfs
sudo chown $USER:$USER /mnt/s3fs
Mount Management
# Mount all entries in fstab
sudo mount -a
# Mount specific filesystem type
sudo mount -t glusterfs -a
# Show mounted filesystems by type
mount | grep glusterfs
mount | grep ceph
findmnt -t glusterfs,ceph,nfs4
# Performance monitoring
iostat -x 1
iotop -o
Advanced fstab Example
# /etc/fstab - Advanced multi-filesystem configuration
# <file system> <mount point> <type> <options> <dump> <pass>
# Local filesystems
UUID=xxx-xxx-xxx / ext4 defaults,noatime 0 1
UUID=yyy-yyy-yyy /boot ext4 defaults 0 2
# NFS mounts - High performance setup
192.168.1.100:/srv/nfs/data /mnt/nfs_data nfs4 defaults,_netdev,rsize=32768,wsize=32768,hard,intr 0 0
192.168.1.100:/srv/nfs/backup /mnt/nfs_backup nfs4 defaults,_netdev,ro,noauto 0 0
# CIFS mounts - Windows integration
//192.168.1.200/shared /mnt/cifs_shared cifs credentials=/etc/cifs-credentials,uid=1000,gid=1000,_netdev,vers=3.0,cache=strict 0 0
# SSHFS mounts - Secure remote access
user@192.168.1.50:/data /mnt/sshfs_data fuse.sshfs defaults,_netdev,IdentityFile=/home/user/.ssh/id_rsa,reconnect,compression=yes 0 0
# GlusterFS - Distributed storage cluster
gluster1:/gv0 /mnt/glusterfs glusterfs defaults,_netdev,backup-volfile-servers=gluster2:gluster3,log-level=WARNING,attribute-timeout=600 0 0
# CephFS - Ceph distributed filesystem
192.168.1.10:6789,192.168.1.11:6789:/ /mnt/cephfs ceph name=admin,secret=AQD...,_netdev,noatime 0 0
# S3FS - Cloud storage
my-bucket /mnt/s3fs fuse.s3fs _netdev,passwd_file=/home/user/.passwd-s3fs,url=https://s3.us-west-2.amazonaws.com,use_cache=/tmp 0 0
# OCFS2 - Oracle cluster filesystem (requires shared storage)
/dev/mapper/shared-disk /mnt/ocfs2 ocfs2 _netdev,datavolume,nointr 0 0
Troubleshooting
GlusterFS Troubleshooting
# Check GlusterFS services
sudo systemctl status glusterd
sudo gluster peer status
sudo gluster volume status
# GlusterFS logs
sudo tail -f /var/log/glusterfs/mnt-glusterfs.log
sudo gluster volume log locate
# Test GlusterFS connectivity
sudo gluster volume info
telnet gluster-server 24007
CephFS Troubleshooting
# Check Ceph cluster health
sudo ceph -s
sudo ceph fs ls
sudo ceph mds stat
# CephFS logs
sudo tail -f /var/log/ceph/ceph-fuse.log
dmesg | grep ceph
# Test Ceph connectivity
ceph auth list
ceph osd tree
Performance Monitoring
# Filesystem-specific monitoring
# GlusterFS
sudo gluster volume top volume_name read-perf
sudo gluster volume profile volume_name start
sudo gluster volume profile volume_name info
# General filesystem performance
iostat -x 1
iotop -o
nfsstat -c # for NFS
smbstatus # for CIFS
# Network performance
iftop
nethogs
ss -tuln | grep :2049 # NFS
ss -tuln | grep :24007 # GlusterFS
Common Issues and Solutions
Mount Point Busy
# Find processes using mount point
sudo fuser -v /mount/point
sudo lsof +D /mount/point
# Force unmount
sudo fuser -k /mount/point
sudo umount -l /mount/point # lazy unmount
Permission Issues
# Check mount options
cat /proc/mounts | grep /mount/point
# Fix ownership
sudo chown -R user:group /mount/point
# Check filesystem permissions
getfacl /mount/point # if ACLs are used
This enhanced guide now includes GlusterFS, CephFS, OCFS2, HDFS, and S3FS with detailed explanations of why specific mount options should be used and their benefits for different use cases.
Advanced Filesystem Mounting Guide
Table of Contents
- Architecture Overview
- Advanced NFS with Stunnel
- High-Performance GlusterFS
- Enterprise CIFS/SMB
- Distributed Storage Systems
- Performance Analysis & Tuning
- Security Hardening
- Monitoring & Alerting
- Disaster Recovery
- Production Use Cases
- Troubleshooting Deep Dive
Architecture Overview
Filesystem Stack Understanding
Application Layer
↓
VFS (Virtual File System)
↓
Filesystem Implementation (ext4, xfs, btrfs)
↓
Block Layer
↓
Storage Device Driver
↓
Physical Storage
Network Filesystem Architecture
Client Application
↓
VFS Layer
↓
Network Filesystem Client (NFS, CIFS, GlusterFS)
↓
Network Stack (TCP/UDP)
↓
Network Interface
↓
[Network Infrastructure]
↓
Server Network Interface
↓
Network Filesystem Server
↓
Local Filesystem
↓
Storage
Advanced NFS with Stunnel
Why Use Stunnel with NFS?
NFS traditionally lacks encryption for data in transit. Stunnel provides SSL/TLS encryption tunnel for NFS traffic, essential for:
- Compliance requirements (PCI-DSS, HIPAA)
- Secure transmission over untrusted networks
- Protection against network sniffing attacks
Complete Stunnel + NFS Implementation
Server-Side Configuration
# Install stunnel
sudo apt-get update
sudo apt-get install stunnel4 nfs-kernel-server
# Enable stunnel
echo "ENABLED=1" | sudo tee /etc/default/stunnel4
# Generate SSL certificates for production
sudo mkdir -p /etc/stunnel/certs
sudo openssl req -new -x509 -days 3650 -nodes \
-out /etc/stunnel/certs/nfs-server.crt \
-keyout /etc/stunnel/certs/nfs-server.key \
-subj "/C=US/ST=State/L=City/O=Organization/OU=IT/CN=nfs-server.example.com"
# Combine certificate and key
sudo cat /etc/stunnel/certs/nfs-server.crt /etc/stunnel/certs/nfs-server.key > /etc/stunnel/certs/nfs-server.pem
sudo chmod 600 /etc/stunnel/certs/nfs-server.pem
sudo chown stunnel4:stunnel4 /etc/stunnel/certs/nfs-server.pem
Server Stunnel Configuration (/etc/stunnel/nfs-server.conf)
# Global options
cert = /etc/stunnel/certs/nfs-server.pem
pid = /var/run/stunnel4/nfs-server.pid
debug = 4
output = /var/log/stunnel4/nfs-server.log
# Security options
options = NO_SSLv2
options = NO_SSLv3
options = CIPHER_SERVER_PREFERENCE
ciphers = ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384
fips = no
# Connection options
socket = l:TCP_NODELAY=1
socket = r:TCP_NODELAY=1
# NFS services
[nfs]
accept = 20049
connect = 127.0.0.1:2049
cert = /etc/stunnel/certs/nfs-server.pem
[portmapper]
accept = 1111
connect = 127.0.0.1:111
[mountd]
accept = 10050
connect = 127.0.0.1:20048
[nlockmgr]
accept = 40001
connect = 127.0.0.1:4001
[status]
accept = 10051
connect = 127.0.0.1:1110
NFS Server Configuration
# Configure NFS exports
sudo tee /etc/exports << 'EOF'
/srv/nfs/data 192.168.1.0/24(rw,sync,no_subtree_check,no_root_squash,fsid=0)
/srv/nfs/backup 192.168.1.0/24(ro,sync,no_subtree_check,root_squash)
/srv/nfs/home 192.168.1.0/24(rw,sync,no_subtree_check,all_squash,anonuid=1000,anongid=1000)
EOF
# Lock NFS to specific ports for stunnel
sudo tee -a /etc/default/nfs-kernel-server << 'EOF'
RPCNFSDOPTS="-p 2049"
RPCMOUNTDOPTS="-p 20048"
STATDOPTS="--port 1110 --outgoing-port 1111"
LOCKD_TCPPORT=4001
LOCKD_UDPPORT=4001
EOF
# Configure RPC services
sudo tee /etc/modprobe.d/lockd.conf << 'EOF'
options lockd nlm_tcpport=4001 nlm_udpport=4001
EOF
# Create NFS directories
sudo mkdir -p /srv/nfs/{data,backup,home}
sudo chmod 755 /srv/nfs
sudo chown -R nobody:nogroup /srv/nfs
# Start services
sudo systemctl restart nfs-kernel-server
sudo systemctl restart stunnel4
sudo systemctl enable nfs-kernel-server stunnel4
Client-Side Configuration
# Install stunnel and NFS client
sudo apt-get install stunnel4 nfs-common
# Enable stunnel
echo "ENABLED=1" | sudo tee /etc/default/stunnel4
# Copy server certificate
sudo scp nfs-server:/etc/stunnel/certs/nfs-server.crt /etc/stunnel/certs/
# Client stunnel configuration
sudo tee /etc/stunnel/nfs-client.conf << 'EOF'
# Global options
client = yes
pid = /var/run/stunnel4/nfs-client.pid
debug = 4
output = /var/log/stunnel4/nfs-client.log
# Security options
verify = 2
CAfile = /etc/stunnel/certs/nfs-server.crt
options = NO_SSLv2
options = NO_SSLv3
# Connection options
socket = l:TCP_NODELAY=1
socket = r:TCP_NODELAY=1
# NFS services - connect to server
[nfs]
accept = 127.0.0.1:2049
connect = 192.168.1.100:20049
[portmapper]
accept = 127.0.0.1:111
connect = 192.168.1.100:1111
[mountd]
accept = 127.0.0.1:20048
connect = 192.168.1.100:10050
[nlockmgr]
accept = 127.0.0.1:4001
connect = 192.168.1.100:40001
[status]
accept = 127.0.0.1:1110
connect = 192.168.1.100:10051
EOF
# Start stunnel
sudo systemctl restart stunnel4
sudo systemctl enable stunnel4
Client Mount Configuration
# Create mount points
sudo mkdir -p /mnt/secure-nfs/{data,backup,home}
# Test manual mount through stunnel
sudo mount -t nfs -o vers=4,proto=tcp,port=2049 127.0.0.1:/ /mnt/secure-nfs/data
# fstab configuration for encrypted NFS
sudo tee -a /etc/fstab << 'EOF'
# Secure NFS mounts through stunnel
127.0.0.1:/data /mnt/secure-nfs/data nfs4 defaults,_netdev,proto=tcp,port=2049,hard,intr,timeo=600,retrans=2 0 0
127.0.0.1:/backup /mnt/secure-nfs/backup nfs4 defaults,_netdev,proto=tcp,port=2049,ro,hard,intr 0 0
127.0.0.1:/home /mnt/secure-nfs/home nfs4 defaults,_netdev,proto=tcp,port=2049,hard,intr,timeo=600 0 0
EOF
Advanced Stunnel Configuration for Production
Performance Tuning
# /etc/stunnel/nfs-server.conf - Production optimizations
[global]
# Performance options
socket = l:SO_REUSEADDR=1
socket = r:SO_REUSEADDR=1
socket = l:SO_KEEPALIVE=1
socket = r:SO_KEEPALIVE=1
socket = l:TCP_NODELAY=1
socket = r:TCP_NODELAY=1
# Connection pooling
session = 3600
sessiond = /var/lib/stunnel4/
# Compression (CPU vs network bandwidth trade-off)
compression = deflate
# Client certificate verification (mutual TLS)
verify = 2
CAfile = /etc/stunnel/certs/ca-bundle.crt
CRLfile = /etc/stunnel/certs/crl.pem
High Availability Stunnel
# Create stunnel cluster with keepalived
sudo tee /etc/keepalived/keepalived.conf << 'EOF'
vrrp_script chk_stunnel {
script "/bin/pgrep stunnel4"
interval 2
weight 2
fall 3
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass mypassword
}
virtual_ipaddress {
192.168.1.200/24
}
track_script {
chk_stunnel
}
}
EOF
High-Performance GlusterFS
Enterprise GlusterFS Architecture
Three-Node Replica Configuration
# On all nodes - install GlusterFS
sudo apt-get update
sudo apt-get install glusterfs-server
# Start and enable glusterd
sudo systemctl start glusterd
sudo systemctl enable glusterd
# Create trusted storage pool (run on node1)
sudo gluster peer probe gluster-node2.example.com
sudo gluster peer probe gluster-node3.example.com
# Verify peer status
sudo gluster peer status
Advanced Volume Creation
# Create brick directories with proper alignment
sudo mkdir -p /data/glusterfs/brick1
sudo chmod 755 /data/glusterfs/brick1
# Create replica volume with advanced options
sudo gluster volume create gv-prod \
replica 3 \
transport tcp \
gluster-node1.example.com:/data/glusterfs/brick1 \
gluster-node2.example.com:/data/glusterfs/brick1 \
gluster-node3.example.com:/data/glusterfs/brick1
# Configure volume for production workloads
sudo gluster volume set gv-prod performance.cache-size 256MB
sudo gluster volume set gv-prod performance.io-thread-count 16
sudo gluster volume set gv-prod performance.read-ahead on
sudo gluster volume set gv-prod performance.readdir-ahead on
sudo gluster volume set gv-prod network.ping-timeout 10
sudo gluster volume set gv-prod cluster.lookup-optimize on
sudo gluster volume set gv-prod cluster.readdir-optimize on
# Security settings
sudo gluster volume set gv-prod auth.allow "192.168.1.*"
sudo gluster volume set gv-prod server.ssl on
sudo gluster volume set gv-prod client.ssl on
# Start volume
sudo gluster volume start gv-prod
Production Client Configuration
# Install GlusterFS client
sudo apt-get install glusterfs-client
# Create mount point
sudo mkdir -p /mnt/glusterfs-prod
# Advanced mount with all optimizations
sudo mount -t glusterfs \
-o backup-volfile-servers=gluster-node2.example.com:gluster-node3.example.com,log-level=WARNING,log-file=/var/log/gluster-client.log,cache-size=256MB,read-subvolume-index=1,attribute-timeout=600,entry-timeout=600,negative-timeout=60,gid-timeout=300,background-qlen=64,congestion-threshold=12,write-behind-window-size=4MB,read-ahead-page-count=16 \
gluster-node1.example.com:/gv-prod /mnt/glusterfs-prod
GlusterFS Performance Monitoring
# Volume profiling
sudo gluster volume profile gv-prod start
sudo gluster volume profile gv-prod info
# Top operations monitoring
sudo gluster volume top gv-prod read-perf
sudo gluster volume top gv-prod write-perf
sudo gluster volume top gv-prod open
sudo gluster volume top gv-prod read
sudo gluster volume top gv-prod write
# Monitoring script for system engineers
sudo tee /usr/local/bin/gluster-monitor.sh << 'EOF'
#!/bin/bash
VOLUME="gv-prod"
LOG_FILE="/var/log/gluster-monitoring.log"
echo "$(date): Gluster Volume Status Check" >> $LOG_FILE
# Check volume status
if ! gluster volume status $VOLUME | grep -q "Started"; then
echo "$(date): ERROR - Volume $VOLUME is not started" >> $LOG_FILE
# Send alert (integrate with your monitoring system)
exit 1
fi
# Check heal status
HEAL_PENDING=$(gluster volume heal $VOLUME info | grep "Number of entries" | awk '{sum += $4} END {print sum}')
if [ "$HEAL_PENDING" -gt 0 ]; then
echo "$(date): WARNING - $HEAL_PENDING files pending heal" >> $LOG_FILE
fi
# Check disk usage
gluster volume status $VOLUME detail | grep -A1 "Disk Space" >> $LOG_FILE
echo "$(date): Gluster monitoring completed" >> $LOG_FILE
EOF
chmod +x /usr/local/bin/gluster-monitor.sh
# Add to cron for regular monitoring
echo "*/5 * * * * /usr/local/bin/gluster-monitor.sh" | sudo crontab -
GlusterFS fstab Configuration
# Production fstab entry
gluster-node1.example.com:/gv-prod /mnt/glusterfs-prod glusterfs defaults,_netdev,backup-volfile-servers=gluster-node2.example.com:gluster-node3.example.com,log-level=WARNING,attribute-timeout=600,entry-timeout=600 0 0
Enterprise CIFS/SMB/AD Integration
Samba with Active Directory Integration
Server Configuration
# Install Samba with AD support
sudo apt-get install samba smbclient winbind libnss-winbind libpam-winbind krb5-config
# Configure Kerberos
sudo tee /etc/krb5.conf << 'EOF'
[libdefaults]
default_realm = EXAMPLE.COM
kdc_timesync = 1
ccache_type = 4
forwardable = true
proxiable = true
fcc-mit-ticketflags = true
[realms]
EXAMPLE.COM = {
kdc = dc1.example.com
kdc = dc2.example.com
admin_server = dc1.example.com
default_domain = example.com
}
[domain_realm]
.example.com = EXAMPLE.COM
example.com = EXAMPLE.COM
EOF
Advanced Samba Configuration
# /etc/samba/smb.conf
sudo tee /etc/samba/smb.conf << 'EOF'
[global]
# Active Directory Integration
workgroup = EXAMPLE
realm = EXAMPLE.COM
security = ads
encrypt passwords = yes
# Authentication
password server = dc1.example.com dc2.example.com
winbind use default domain = yes
winbind offline logon = yes
winbind cache time = 300
winbind enum users = yes
winbind enum groups = yes
# Performance optimizations
socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=131072 SO_SNDBUF=131072
read raw = yes
write raw = yes
max xmit = 65535
dead time = 15
getwd cache = yes
# Security
client signing = mandatory
server signing = mandatory
smb encrypt = required
# Logging
log file = /var/log/samba/log.%m
log level = 2
max log size = 1000
# VFS modules for advanced features
vfs objects = acl_xattr catia fruit streams_xattr
[data]
path = /srv/samba/data
browseable = yes
writable = yes
valid users = @"EXAMPLE\Domain Users"
admin users = @"EXAMPLE\Domain Admins"
force group = "domain users"
create mask = 0664
directory mask = 0775
# Advanced features
vfs objects = full_audit recycle
full_audit:prefix = %u|%I|%S
full_audit:success = mkdir rmdir read pread write pwrite rename unlink
full_audit:failure = all
full_audit:facility = local5
full_audit:priority = notice
recycle:repository = .recycle
recycle:keeptree = yes
recycle:maxsize = 0
recycle:exclude = *.tmp,*.log,~*
EOF
# Join domain
sudo net ads join -U administrator
sudo systemctl restart smbd nmbd winbind
sudo systemctl enable smbd nmbd winbind
Client-Side CIFS with AD Authentication
# Install required packages
sudo apt-get install cifs-utils krb5-user
# Create credentials file with Kerberos support
sudo tee /etc/cifs-ad-credentials << 'EOF'
username=serviceaccount
password=P@ssw0rd123
domain=EXAMPLE.COM
EOF
sudo chmod 600 /etc/cifs-ad-credentials
# Advanced CIFS mount with AD integration
sudo mount -t cifs //fileserver.example.com/data /mnt/cifs-ad \
-o credentials=/etc/cifs-ad-credentials,domain=EXAMPLE.COM,vers=3.0,sec=krb5i,cache=strict,rsize=1048576,wsize=1048576,echo_interval=60,iocharset=utf8,file_mode=0644,dir_mode=0755
# fstab configuration for AD-integrated CIFS
echo "//fileserver.example.com/data /mnt/cifs-ad cifs credentials=/etc/cifs-ad-credentials,domain=EXAMPLE.COM,vers=3.0,sec=krb5i,cache=strict,rsize=1048576,wsize=1048576,_netdev,uid=1000,gid=1000 0 0" | sudo tee -a /etc/fstab
Distributed Storage Systems
CephFS Production Deployment
Ceph Cluster Setup (3-node example)
# On all nodes - install Ceph
curl -fsSL https://download.ceph.com/keys/release.asc | sudo apt-key add -
echo "deb https://download.ceph.com/debian-quincy/ $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/ceph.list
sudo apt-get update
sudo apt-get install ceph ceph-mds
# Bootstrap cluster (on admin node)
sudo ceph-deploy new ceph-node1 ceph-node2 ceph-node3
# Configure cluster
sudo tee -a ceph.conf << 'EOF'
# Performance tuning
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 128
osd pool default pgp num = 128
# Network configuration
public network = 192.168.1.0/24
cluster network = 192.168.2.0/24
# MDS configuration
mds cache memory limit = 4294967296
mds cache reservation = 0.1
mds health cache threshold = 1.5
EOF
# Deploy monitors
sudo ceph-deploy mon create-initial
# Deploy OSDs (assuming /dev/sdb on each node)
sudo ceph-deploy osd create --data /dev/sdb ceph-node1
sudo ceph-deploy osd create --data /dev/sdb ceph-node2
sudo ceph-deploy osd create --data /dev/sdb ceph-node3
# Deploy MDS for CephFS
sudo ceph-deploy mds create ceph-node1 ceph-node2 ceph-node3
# Create CephFS
sudo ceph osd pool create cephfs_data 128
sudo ceph osd pool create cephfs_metadata 64
sudo ceph fs new cephfs cephfs_metadata cephfs_data
CephFS Client Configuration
# Install CephFS client
sudo apt-get install ceph-fuse
# Copy admin keyring
sudo scp ceph-admin:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
sudo scp ceph-admin:/etc/ceph/ceph.conf /etc/ceph/
# Create dedicated user for CephFS access
sudo ceph auth get-or-create client.cephfs mon 'allow r' mds 'allow rw' osd 'allow rw pool=cephfs_data'
# Production mount with optimizations
sudo mkdir -p /mnt/cephfs
sudo ceph-fuse /mnt/cephfs \
--name client.cephfs \
--keyring /etc/ceph/ceph.client.cephfs.keyring \
-o cache,rsize=16777216,rasize=16777216,lazyio,fuse_set_user_groups
# fstab configuration
echo "none /mnt/cephfs fuse.ceph ceph.name=cephfs,ceph.keyring=/etc/ceph/ceph.client.cephfs.keyring,_netdev,cache,rsize=16777216,defaults 0 0" | sudo tee -a /etc/fstab
HDFS with High Availability
HDFS NameNode HA Configuration
# Install Hadoop
wget https://archive.apache.org/dist/hadoop/common/hadoop-3.3.4/hadoop-3.3.4.tar.gz
sudo tar -xzf hadoop-3.3.4.tar.gz -C /opt/
sudo mv /opt/hadoop-3.3.4 /opt/hadoop
sudo chown -R hadoop:hadoop /opt/hadoop
# Configure core-site.xml for HA
sudo tee /opt/hadoop/etc/hadoop/core-site.xml << 'EOF'
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/lib/hadoop/tmp</value>
</property>
</configuration>
EOF
# Configure hdfs-site.xml for HA
sudo tee /opt/hadoop/etc/hadoop/hdfs-site.xml << 'EOF'
<configuration>
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>namenode1.example.com:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>namenode2.example.com:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>namenode1.example.com:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>namenode2.example.com:9870</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://journal1.example.com:8485;journal2.example.com:8485;journal3.example.com:8485/mycluster</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
</configuration>
EOF
HDFS FUSE Mount
# Build HDFS FUSE
cd /opt/hadoop/share/hadoop/tools/lib/
sudo wget https://github.com/apache/hadoop/releases/download/rel/release-3.3.4/hadoop-hdfs-nfs3-3.3.4.jar
# Create HDFS FUSE mount script
sudo tee /usr/local/bin/mount-hdfs.sh << 'EOF'
#!/bin/bash
HADOOP_HOME=/opt/hadoop
HDFS_MOUNT_POINT=/mnt/hdfs
NAMENODE_URI="hdfs://mycluster"
# Set Hadoop environment
export HADOOP_HOME
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
# Create mount point
mkdir -p $HDFS_MOUNT_POINT
# Mount HDFS
$HADOOP_HOME/bin/hadoop-fuse-dfs $NAMENODE_URI $HDFS_MOUNT_POINT \
-o allow_other,usetrash,rdbuffer_size=131072,rdcache_size=134217728
echo "HDFS mounted at $HDFS_MOUNT_POINT"
EOF
chmod +x /usr/local/bin/mount-hdfs.sh
Performance Analysis & Tuning
Filesystem Performance Benchmarking
IOzone Comprehensive Testing
# Install IOzone
sudo apt-get install iozone3
# Comprehensive filesystem benchmark script
sudo tee /usr/local/bin/fs-benchmark.sh << 'EOF'
#!/bin/bash
MOUNT_POINT="$1"
RESULTS_DIR="/tmp/fs-benchmarks/$(basename $MOUNT_POINT)-$(date +%Y%m%d-%H%M%S)"
if [ -z "$MOUNT_POINT" ]; then
echo "Usage: $0 <mount_point>"
exit 1
fi
mkdir -p "$RESULTS_DIR"
echo "Starting filesystem benchmark for $MOUNT_POINT"
echo "Results will be saved to $RESULTS_DIR"
# IOzone tests
echo "Running IOzone tests..."
cd "$MOUNT_POINT"
# Record size test (4k to 16M)
iozone -a -g 4G -i 0 -i 1 -i 2 -f "$MOUNT_POINT/iozone-test" \
-R -b "$RESULTS_DIR/iozone-excel.xls" > "$RESULTS_DIR/iozone-output.txt"
# Specific workload tests
echo "Running specific workload tests..."
# Database workload simulation (random I/O)
iozone -i 0 -i 2 -s 1G -r 8k -f "$MOUNT_POINT/db-test" > "$RESULTS_DIR/database-workload.txt"
# Web server workload (small files)
iozone -i 0 -i 1 -s 100M -r 4k -f "$MOUNT_POINT/web-test" > "$RESULTS_DIR/webserver-workload.txt"
# Streaming workload (large sequential)
iozone -i 0 -i 1 -s 4G -r 1M -f "$MOUNT_POINT/stream-test" > "$RESULTS_DIR/streaming-workload.txt"
# Cleanup
rm -f "$MOUNT_POINT"/*-test
echo "Benchmark completed. Results in $RESULTS_DIR"
EOF
chmod +x /usr/local/bin/fs-benchmark.sh
Network Filesystem Specific Testing
# Network latency and throughput testing
sudo tee /usr/local/bin/network-fs-test.sh << 'EOF'
#!/bin/bash
MOUNT_POINT="$1"
SERVER_IP="$2"
if [ -z "$MOUNT_POINT" ] || [ -z "$SERVER_IP" ]; then
echo "Usage: $0 <mount_point> <server_ip>"
exit 1
fi
echo "Testing network filesystem performance"
echo "Mount point: $MOUNT_POINT"
echo "Server IP: $SERVER_IP"
# Network latency test
echo "Network latency to server:"
ping -c 10 "$SERVER_IP" | tail -n 1
# Bandwidth test
echo "Network bandwidth test:"
iperf3 -c "$SERVER_IP" -t 30
# File operation latency
echo "File operation latency test:"
time ls -la "$MOUNT_POINT" > /dev/null
# Small file performance
echo "Small file performance (1000 x 4KB files):"
cd "$MOUNT_POINT"
time for i in {1..1000}; do
echo "test data" > "small-file-$i.txt"
done
time rm -f small-file-*.txt
# Large file throughput
echo "Large file throughput test:"
time dd if=/dev/zero of="$MOUNT_POINT/large-test-file" bs=1M count=1000 oflag=direct
time dd if="$MOUNT_POINT/large-test-file" of=/dev/null bs=1M iflag=direct
rm -f "$MOUNT_POINT/large-test-file"
echo "Network filesystem test completed"
EOF
chmod +x /usr/local/bin/network-fs-test.sh
System-Level Performance Tuning
Kernel Parameter Optimization
# Create sysctl configuration for network filesystems
sudo tee /etc/sysctl.d/99-network-fs.conf << 'EOF'
# Network buffer sizes
net.core.rmem_default = 262144
net.core.rmem_max = 16777216
net.core.wmem_default = 262144
net.core.wmem_max = 16777216
# TCP settings
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_congestion_control = bbr
net.ipv4.tcp_slow_start_after_idle = 0
# File system settings
fs.file-max = 2097152
fs.nr_open = 1048576
# Virtual memory settings
vm.dirty_ratio = 15
vm.dirty_background_ratio = 5
vm.dirty_expire_centisecs = 12000
vm.dirty_writeback_centisecs = 1500
# NFS specific
sunrpc.tcp_slot_table_entries = 128
sunrpc.udp_slot_table_entries = 128
EOF
# Apply settings
sudo sysctl -p /etc/sysctl.d/99-network-fs.conf
Process and Resource Limits
# Configure limits for network filesystem processes
sudo tee /etc/security/limits.d/network-fs.conf << 'EOF'
# NFS daemon limits
rpc soft nofile 65536
rpc hard nofile 65536
rpc soft nproc 4096
rpc hard nproc 4096
# Gluster daemon limits
gluster soft nofile 65536
gluster hard nofile 65536
gluster soft nproc 4096
gluster hard nproc 4096
# General user limits for accessing network filesystems
* soft nofile 8192
* hard nofile 16384
EOF
Security Hardening
Network Filesystem Security Framework
IPTables Rules for NFS with Stunnel
# Create comprehensive iptables rules
sudo tee /etc/iptables/nfs-security.rules << 'EOF'
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
# Allow loopback
-A INPUT -i lo -j ACCEPT
# Allow established connections
-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# SSH access (modify port as needed)
-A INPUT -p tcp --dport 22 -j ACCEPT
# Stunnel encrypted NFS ports
-A INPUT -p tcp --dport 20049 -s 192.168.1.0/24 -j ACCEPT
-A INPUT -p tcp --dport 1111 -s 192.168.1.0/24 -j ACCEPT
-A INPUT -p tcp --dport 10050 -s 192.168.1.0/24 -j ACCEPT
-A INPUT -p tcp --dport 40001 -s 192.168.1.0/24 -j ACCEPT
-A INPUT -p tcp --dport 10051 -s 192.168.1.0/24 -j ACCEPT
# Block direct NFS access
-A INPUT -p tcp --dport 2049 -j DROP
-A INPUT -p udp --dport 2049 -j DROP
-A INPUT -p tcp --dport 111 -j DROP
-A INPUT -p udp --dport 111 -j DROP
# GlusterFS secured ports
-A INPUT -p tcp --dport 24007:24008 -s 192.168.1.0/24 -j ACCEPT
-A INPUT -p tcp --dport 49152:49156 -s 192.168.1.0/24 -j ACCEPT
# Logging
-A INPUT -j LOG --log-prefix "IPTABLES-DROPPED: "
COMMIT
EOF
# Apply rules
sudo iptables-restore < /etc/iptables/nfs-security.rules
sudo iptables-save > /etc/iptables/rules.v4
SELinux Configuration for Network Filesystems
# Configure SELinux for NFS
sudo setsebool -P nfs_export_all_rw on
sudo setsebool -P nfs_export_all_ro on
sudo setsebool -P use_nfs_home_dirs on
# Custom SELinux policy for stunnel+NFS
sudo tee /tmp/stunnel-nfs.te << 'EOF'
module stunnel-nfs 1.0;
require {
type stunnel_t;
type nfs_t;
type nfsd_t;
class tcp_socket { accept bind create getattr listen setopt };
class process { setrlimit };
}
# Allow stunnel to bind to NFS ports
allow stunnel_t self:tcp_socket { accept bind create getattr listen setopt };
# Allow stunnel to connect to NFS services
allow stunnel_t nfsd_t:tcp_socket { accept bind create getattr listen setopt };
EOF
# Compile and install policy
sudo checkmodule -M -m -o /tmp/stunnel-nfs.mod /tmp/stunnel-nfs.te
sudo semodule_package -o /tmp/stunnel-nfs.pp -m /tmp/stunnel-nfs.mod
sudo semodule -i /tmp/stunnel-nfs.pp
Certificate Management for Stunnel
Automated Certificate Renewal
# Create certificate renewal script
sudo tee /usr/local/bin/stunnel-cert-renewal.sh << 'EOF'
#!/bin/bash
CERT_DIR="/etc/stunnel/certs"
DAYS_BEFORE_EXPIRY=30
LOG_FILE="/var/log/stunnel-cert-renewal.log"
# Check certificate expiry
check_cert_expiry() {
local cert_file="$1"
local expiry_date=$(openssl x509 -enddate -noout -in "$cert_file" | cut -d= -f2)
local expiry_epoch=$(date -d "$expiry_date" +%s)
local current_epoch=$(date +%s)
local days_until_expiry=$(( (expiry_epoch - current_epoch) / 86400 ))
echo $days_until_expiry
}
# Renew certificate
renew_cert() {
local cert_name="$1"
echo "$(date): Renewing certificate for $cert_name" >> $LOG_FILE
# Generate new certificate
openssl req -new -x509 -days 365 -nodes \
-out "$CERT_DIR/${cert_name}-new.crt" \
-keyout "$CERT_DIR/${cert_name}-new.key" \
-subj "/C=US/ST=State/L=City/O=Organization/OU=IT/CN=${cert_name}.example.com"
# Combine certificate and key
cat "$CERT_DIR/${cert_name}-new.crt" "$CERT_DIR/${cert_name}-new.key" > "$CERT_DIR/${cert_name}-new.pem"
# Backup old certificate
cp "$CERT_DIR/${cert_name}.pem" "$CERT_DIR/${cert_name}.pem.backup.$(date +%Y%m%d)"
# Replace old certificate
mv "$CERT_DIR/${cert_name}-new.pem" "$CERT_DIR/${cert_name}.pem"
rm -f "$CERT_DIR/${cert_name}-new.crt" "$CERT_DIR/${cert_name}-new.key"
# Set permissions
chmod 600 "$CERT_DIR/${cert_name}.pem"
chown stunnel4:stunnel4 "$CERT_DIR/${cert_name}.pem"
# Reload stunnel
systemctl reload stunnel4
echo "$(date): Certificate renewal completed for $cert_name" >> $LOG_FILE
}
# Main logic
for cert in nfs-server nfs-client; do
if [ -f "$CERT_DIR/${cert}.pem" ]; then
days_left=$(check_cert_expiry "$CERT_DIR/${cert}.pem")
if [ $days_left -le $DAYS_BEFORE_EXPIRY ]; then
renew_cert "$cert"
else
echo "$(date): Certificate $cert is valid for $days_left more days" >> $LOG_FILE
fi
fi
done
EOF
chmod +x /usr/local/bin/stunnel-cert-renewal.sh
# Add to cron for monthly check
echo "0 2 1 * * /usr/local/bin/stunnel-cert-renewal.sh" | sudo crontab -
Monitoring & Alerting
Comprehensive Monitoring System
Prometheus Metrics Collection
# Install node_exporter for system metrics
wget https://github.com/prometheus/node_exporter/releases/download/v1.6.1/node_exporter-1.6.1.linux-amd64.tar.gz
tar -xzf node_exporter-1.6.1.linux-amd64.tar.gz
sudo mv node_exporter-1.6.1.linux-amd64/node_exporter /usr/local/bin/
sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
# Create systemd service
sudo tee /etc/systemd/system/node_exporter.service << 'EOF'
[Unit]
Description=Node Exporter
Wants=network-online.target
After=network-online.target
[Service]
User=node_exporter
Group=node_exporter
Type=simple
ExecStart=/usr/local/bin/node_exporter \
--collector.filesystem.ignored-mount-points="^/(dev|proc|sys|var/lib/docker/.+)($|/)" \
--collector.filesystem.ignored-fs-types="^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$" \
--collector.netdev.ignored-devices="^(veth.*|docker.*|br-.*)$"
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable node_exporter
sudo systemctl start node_exporter
Custom NFS Metrics Exporter
# Create NFS metrics exporter
sudo tee /usr/local/bin/nfs-metrics-exporter.py << 'EOF'
#!/usr/bin/env python3
import time
import subprocess
import re
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
class NFSMetricsHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/metrics':
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
metrics = self.collect_nfs_metrics()
self.wfile.write(metrics.encode())
else:
self.send_response(404)
self.end_headers()
def collect_nfs_metrics(self):
metrics = []
# NFS client statistics
try:
with open('/proc/net/rpc/nfs') as f:
content = f.read()
# Extract RPC statistics
rpc_match = re.search(r'rpc (\d+) (\d+) (\d+)', content)
if rpc_match:
calls, retrans, authrefresh = rpc_match.groups()
metrics.append(f'nfs_rpc_calls_total {calls}')
metrics.append(f'nfs_rpc_retransmissions_total {retrans}')
metrics.append(f'nfs_rpc_auth_refreshes_total {authrefresh}')
# Extract procedure statistics
proc_match = re.search(r'proc4 \d+ (.+)', content)
if proc_match:
procs = proc_match.group(1).split()
proc_names = ['null', 'read', 'write', 'commit', 'open', 'open_confirm',
'open_noattr', 'open_downgrade', 'close', 'setattr', 'fsinfo',
'renew', 'setclientid', 'setclientid_confirm', 'lock', 'lockt',
'locku', 'access', 'getattr', 'lookup', 'lookup_root', 'remove',
'rename', 'link', 'symlink', 'create', 'pathconf', 'statfs',
'readlink', 'readdir', 'server_caps', 'delegreturn', 'getacl',
'setacl', 'fs_locations']
for i, count in enumerate(procs[:len(proc_names)]):
metrics.append(f'nfs_operations_total{{operation="{proc_names[i]}"}} {count}')
except FileNotFoundError:
pass
# NFS mount information
try:
result = subprocess.run(['mount', '-t', 'nfs,nfs4'],
capture_output=True, text=True)
mount_count = len(result.stdout.strip().split('\n')) if result.stdout.strip() else 0
metrics.append(f'nfs_mounts_total {mount_count}')
except:
pass
# Stunnel connection status
try:
result = subprocess.run(['pgrep', 'stunnel'],
capture_output=True, text=True)
stunnel_running = 1 if result.returncode == 0 else 0
metrics.append(f'stunnel_running {stunnel_running}')
except:
pass
return '\n'.join(metrics) + '\n'
def run_server(port=9100):
server = HTTPServer(('0.0.0.0', port), NFSMetricsHandler)
print(f"NFS Metrics Exporter running on port {port}")
server.serve_forever()
if __name__ == '__main__':
run_server()
EOF
chmod +x /usr/local/bin/nfs-metrics-exporter.py
# Create systemd service for NFS metrics exporter
sudo tee /etc/systemd/system/nfs-metrics-exporter.service << 'EOF'
[Unit]
Description=NFS Metrics Exporter
After=network.target
[Service]
Type=simple
User=nobody
ExecStart=/usr/local/bin/nfs-metrics-exporter.py
Restart=always
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable nfs-metrics-exporter
sudo systemctl start nfs-metrics-exporter
GlusterFS Monitoring Script
# Advanced GlusterFS monitoring
sudo tee /usr/local/bin/gluster-advanced-monitor.sh << 'EOF'
#!/bin/bash
VOLUME_NAME="gv-prod"
METRICS_FILE="/var/lib/prometheus/node-exporter/gluster.prom"
LOG_FILE="/var/log/gluster-monitoring.log"
# Create metrics directory
mkdir -p "$(dirname $METRICS_FILE)"
# Function to write metrics
write_metric() {
local metric_name="$1"
local metric_value="$2"
local labels="$3"
if [ -n "$labels" ]; then
echo "${metric_name}{${labels}} ${metric_value}" >> "$METRICS_FILE.tmp"
else
echo "${metric_name} ${metric_value}" >> "$METRICS_FILE.tmp"
fi
}
# Initialize temporary metrics file
> "$METRICS_FILE.tmp"
# Check volume status
VOLUME_STATUS=$(gluster volume status $VOLUME_NAME --xml 2>/dev/null)
if [ $? -eq 0 ]; then
write_metric "gluster_volume_status" "1" "volume=\"$VOLUME_NAME\""
# Parse brick status
BRICKS_ONLINE=$(echo "$VOLUME_STATUS" | grep -c "<status>1</status>")
BRICKS_TOTAL=$(echo "$VOLUME_STATUS" | grep -c "<brick>")
write_metric "gluster_bricks_online" "$BRICKS_ONLINE" "volume=\"$VOLUME_NAME\""
write_metric "gluster_bricks_total" "$BRICKS_TOTAL" "volume=\"$VOLUME_NAME\""
else
write_metric "gluster_volume_status" "0" "volume=\"$VOLUME_NAME\""
fi
# Check heal status
HEAL_INFO=$(gluster volume heal $VOLUME_NAME info 2>/dev/null)
if [ $? -eq 0 ]; then
HEAL_PENDING=$(echo "$HEAL_INFO" | grep "Number of entries" | awk '{sum += $4} END {print (sum ? sum : 0)}')
write_metric "gluster_heal_pending_entries" "$HEAL_PENDING" "volume=\"$VOLUME_NAME\""
fi
# Check volume info
VOLUME_INFO=$(gluster volume info $VOLUME_NAME 2>/dev/null)
if [ $? -eq 0 ]; then
REPLICA_COUNT=$(echo "$VOLUME_INFO" | grep "Number of Bricks" | awk '{print $6}' | cut -d'=' -f2)
write_metric "gluster_replica_count" "$REPLICA_COUNT" "volume=\"$VOLUME_NAME\""
fi
# Performance statistics
PERF_INFO=$(timeout 10 gluster volume profile $VOLUME_NAME info 2>/dev/null)
if [ $? -eq 0 ]; then
# Extract read/write statistics (simplified)
TOTAL_READS=$(echo "$PERF_INFO" | grep "READ" | awk '{sum += $2} END {print (sum ? sum : 0)}')
TOTAL_WRITES=$(echo "$PERF_INFO" | grep "WRITE" | awk '{sum += $2} END {print (sum ? sum : 0)}')
write_metric "gluster_total_reads" "$TOTAL_READS" "volume=\"$VOLUME_NAME\""
write_metric "gluster_total_writes" "$TOTAL_WRITES" "volume=\"$VOLUME_NAME\""
fi
# Disk usage per brick
gluster volume status $VOLUME_NAME detail 2>/dev/null | \
grep -A1 "Disk Space" | \
while read line; do
if [[ $line =~ ^([^:]+):.*Available:.*([0-9.]+)(KB|MB|GB|TB) ]]; then
brick="${BASH_REMATCH[1]}"
size="${BASH_REMATCH[2]}"
unit="${BASH_REMATCH[3]}"
# Convert to bytes
case $unit in
"KB") bytes=$(echo "$size * 1024" | bc) ;;
"MB") bytes=$(echo "$size * 1024 * 1024" | bc) ;;
"GB") bytes=$(echo "$size * 1024 * 1024 * 1024" | bc) ;;
"TB") bytes=$(echo "$size * 1024 * 1024 * 1024 * 1024" | bc) ;;
*) bytes="$size" ;;
esac
write_metric "gluster_brick_available_bytes" "$bytes" "volume=\"$VOLUME_NAME\",brick=\"$brick\""
fi
done
# Atomically update metrics file
mv "$METRICS_FILE.tmp" "$METRICS_FILE"
echo "$(date): GlusterFS monitoring completed" >> "$LOG_FILE"
EOF
chmod +x /usr/local/bin/gluster-advanced-monitor.sh
# Add to cron for regular monitoring
echo "*/2 * * * * /usr/local/bin/gluster-advanced-monitor.sh" | sudo crontab -
Production Use Cases
Use Case 1: High-Performance Computing (HPC) Environment
Architecture
Login Nodes (2x) → NFS over Stunnel → Storage Head Node → Parallel Filesystem (Lustre/BeeGFS)
↓
Compute Nodes (100x) → Shared scratch space via GlusterFS
Configuration
# HPC-optimized NFS mount for home directories
echo "storage-head:/home /shared/home nfs4 defaults,_netdev,rsize=1048576,wsize=1048576,hard,intr,timeo=600,retrans=2,fsc 0 0" | sudo tee -a /etc/fstab
# High-performance scratch space with GlusterFS
echo "gluster-node1:/scratch /shared/scratch glusterfs defaults,_netdev,backup-volfile-servers=gluster-node2:gluster-node3,log-level=WARNING,attribute-timeout=3600,entry-timeout=3600,performance.cache-size=512MB,performance.read-ahead=on,performance.readdir-ahead=on 0 0" | sudo tee -a /etc/fstab
# Compute node optimizations
sudo tee /etc/sysctl.d/99-hpc-fs.conf << 'EOF'
# Network optimizations for HPC
net.core.rmem_max = 134217728
net.core.wmem_max = 134217728
net.ipv4.tcp_rmem = 4096 87380 134217728
net.ipv4.tcp_wmem = 4096 65536 134217728
# Memory and I/O optimizations
vm.dirty_ratio = 3
vm.dirty_background_ratio = 1
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
EOF
Use Case 2: Enterprise Web Farm
Architecture
Load Balancer → Web Servers (4x) → Shared Content (NFS over Stunnel)
↓ ↓
Database Cluster → Shared Database Files (OCFS2)
Configuration
# Web content sharing with NFS over Stunnel
echo "127.0.0.1:/web-content /var/www/shared nfs4 defaults,_netdev,rsize=65536,wsize=65536,hard,intr,timeo=600,fsc,local_lock=all 0 0" | sudo tee -a /etc/fstab
# Session storage with GlusterFS
echo "gluster1:/sessions /var/lib/php/sessions glusterfs defaults,_netdev,backup-volfile-servers=gluster2:gluster3,attribute-timeout=1800,entry-timeout=1800 0 0" | sudo tee -a /etc/fstab
# Database shared storage with OCFS2
echo "/dev/mapper/shared-db /shared/database ocfs2 _netdev,datavolume,nointr 0 0" | sudo tee -a /etc/fstab
Use Case 3: Media Production Pipeline
Architecture
Editing Workstations → High-Speed NFS → Central Storage
↓ ↓ ↓
Render Farm → Distributed Cache → Archive Storage (S3FS)
Configuration
# High-performance media storage
echo "media-storage:/active /mnt/active-media nfs4 defaults,_netdev,rsize=1048576,wsize=1048576,hard,intr,timeo=300,proto=tcp,vers=4.1,fsc 0 0" | sudo tee -a /etc/fstab
# Render cache with GlusterFS
echo "render-node1:/render-cache /shared/render-cache glusterfs defaults,_netdev,backup-volfile-servers=render-node2:render-node3,performance.cache-size=1GB,performance.read-ahead=on,attribute-timeout=7200 0 0" | sudo tee -a /etc/fstab
# Archive storage with S3FS
echo "media-archive-bucket /mnt/archive fuse.s3fs _netdev,passwd_file=/etc/s3fs-media.passwd,url=https://s3.us-west-2.amazonaws.com,use_cache=/tmp/s3fs-cache,multipart_size=128,parallel_count=5 0 0" | sudo tee -a /etc/fstab
Use Case 4: Development Environment with CI/CD
Architecture
Developer Workstations → Git Repository (SSHFS)
↓ ↓
CI/CD Pipeline → Shared Build Cache (GlusterFS)
↓ ↓
Artifact Storage → Container Registry Storage (CephFS)
Configuration
# Shared development space
echo "dev-server:/projects /shared/projects fuse.sshfs defaults,_netdev,IdentityFile=/etc/ssh/dev_rsa,uid=1000,gid=1000,reconnect,compression=yes,cache=yes 0 0" | sudo tee -a /etc/fstab
# CI/CD build cache
echo "ci-gluster1:/build-cache /shared/build-cache glusterfs defaults,_netdev,backup-volfile-servers=ci-gluster2:ci-gluster3,performance.cache-size=256MB,attribute-timeout=600 0 0" | sudo tee -a /etc/fstab
# Container registry storage
echo "ceph-mon1:6789,ceph-mon2:6789:/ /shared/registry fuse.ceph ceph.name=registry,ceph.keyring=/etc/ceph/ceph.client.registry.keyring,_netdev,cache,rsize=4194304 0 0" | sudo tee -a /etc/fstab
Troubleshooting Deep Dive
Network Filesystem Debugging Framework
Comprehensive Diagnostic Script
sudo tee /usr/local/bin/fs-diagnostic.sh << 'EOF'
#!/bin/bash
MOUNT_POINT="$1"
LOG_FILE="/tmp/fs-diagnostic-$(date +%Y%m%d-%H%M%S).log"
if [ -z "$MOUNT_POINT" ]; then
echo "Usage: $0 <mount_point>"
exit 1
fi
exec > >(tee -a "$LOG_FILE")
exec 2>&1
echo "Filesystem Diagnostic Report"
echo "============================"
echo "Date: $(date)"
echo "Mount Point: $MOUNT_POINT"
echo "Generated by: $(whoami)"
echo ""
# Basic mount information
echo "=== Mount Information ==="
mount | grep "$MOUNT_POINT"
echo ""
# Filesystem type detection
FS_TYPE=$(findmnt -n -o FSTYPE "$MOUNT_POINT" 2>/dev/null)
echo "Filesystem Type: $FS_TYPE"
echo ""
# Disk usage and inodes
echo "=== Disk Usage ==="
df -h "$MOUNT_POINT"
df -i "$MOUNT_POINT"
echo ""
# Network connectivity (for network filesystems)
if [[ "$FS_TYPE" =~ ^(nfs|nfs4|cifs|glusterfs|fuse\.sshfs)$ ]]; then
echo "=== Network Connectivity ==="
# Extract server information
SERVER=$(mount | grep "$MOUNT_POINT" | awk '{print $1}' | cut -d: -f1)
if [ -n "$SERVER" ]; then
echo "Testing connectivity to server: $SERVER"
ping -c 3 "$SERVER" 2>/dev/null || echo "Ping failed"
# Port tests based on filesystem type
case "$FS_TYPE" in
"nfs"|"nfs4")
echo "Testing NFS ports..."
nc -zv "$SERVER" 2049 2>/dev/null || echo "NFS port 2049 not accessible"
nc -zv "$SERVER" 111 2>/dev/null || echo "Portmapper port 111 not accessible"
;;
"cifs")
echo "Testing CIFS ports..."
nc -zv "$SERVER" 445 2>/dev/null || echo "CIFS port 445 not accessible"
nc -zv "$SERVER" 139 2>/dev/null || echo "NetBIOS port 139 not accessible"
;;
"glusterfs")
echo "Testing GlusterFS ports..."
nc -zv "$SERVER" 24007 2>/dev/null || echo "GlusterFS management port 24007 not accessible"
;;
esac
fi
echo ""
fi
# Process information
echo "=== Related Processes ==="
case "$FS_TYPE" in
"nfs"|"nfs4")
ps aux | grep -E "(nfs|rpc)" | grep -v grep
;;
"cifs")
ps aux | grep cifs | grep -v grep
;;
"glusterfs")
ps aux | grep gluster | grep -v grep
;;
"fuse.sshfs")
ps aux | grep sshfs | grep -v grep
;;
esac
echo ""
# System logs
echo "=== Recent System Logs ==="
journalctl -n 50 --no-pager | grep -i -E "(nfs|cifs|gluster|sshfs|mount|fuse)" || echo "No relevant log entries found"
echo ""
# Performance test
echo "=== Basic Performance Test ==="
echo "Testing write performance..."
time dd if=/dev/zero of="$MOUNT_POINT/test-write-$$" bs=1M count=10 2>/dev/null
echo "Testing read performance..."
time dd if="$MOUNT_POINT/test-write-$$" of=/dev/null bs=1M 2>/dev/null
rm -f "$MOUNT_POINT/test-write-$$" 2>/dev/null
echo ""
# File operations test
echo "=== File Operations Test ==="
echo "Testing file creation..."
touch "$MOUNT_POINT/test-file-$$" && echo "File creation: OK" || echo "File creation: FAILED"
echo "Testing file deletion..."