#!/dataserfs/libs-2024-01-11/bin/python3

###############################################
##                                           ##
##          Runbook Storage Monitor          ##
##                                           ##
##  For monitoring multiple storage clusters ##
## including Isilon, Pure, Qumulo, Netapp, & ##
## Azure Netapp.                             ##
##                                           ##
##  Authors: DJ Weems, Tony Fontanilla       ##
###############################################

import hashlib, json, os, re, requests, sys, time
from runbookpy import agent, utils
from requests.packages.urllib3.exceptions import InsecureRequestWarning

if os.getenv("COMPUTE_SITE") == "US_EUS_PD":
    from azure.cli.core import get_default_cli

# Ignore bad HTTPS certs
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

# Return 
def final_return(state, message):
    if state:
        print(json.dumps({'message': message, 'status': 'Success'}))
        sys.exit(0)
    else:
        print(json.dumps({'message': message, 'status': 'Failed'}))
        sys.exit(1)

# Standard for all cluster classes (TODO: netapp) (Exception: Aznetapp)
# get_used_bytes                              -> returns value
# get_total_bytes                             -> returns value
# get_used_space      (converts to GB for RB) -> returns SCALAR json object
# get_total_space     (converts to GB for RB) -> returns SCALAR json object
# get_used_percentage                         -> returns SCALAR json object
# get_available_space (converts to GB for RB) -> returns SCALAR json object
# get_percentage_up                           -> returns SCALAR json object
# get_quotas                                  -> returns TSV json object

# Isilon ##################################################################################################################################
class Isilon:
    def __init__(self, ip):
        self.ip = ip
        
        # Get login from env variable
        self.login = os.getenv("STORAGE_LOGIN", "NORTHAMERICA\\runbook_mntr")

        # Get password from env variable
        self.password = os.getenv("STORAGE_PASSWORD")

        # Verify password is set
        if self.password == '':
            final_return(False, "Password in correct.")

        self.base_url = f"https://{self.ip}:8080/platform"

    # Call the API
    def call_api(self, endpoint, url):
        # Try to make the get API request, check for connection errors
        try:
            r = requests.get(url, auth=(self.login, self.password), verify=False)
        except requests.exceptions.ConnectionError:
            final_return(False, "Invalid hostname.")

        # Make sure we're authenticated and that we actually have JSON formatted data...
        try:
            if "Authorization required" in r.text:
                final_return(False, "Invalid pasword!")
            return endpoint, json.loads(r.text)
        except json.decoder.JSONDecodeError:
            final_return(False, "API Failure. Not JSON format!")

    # Get used bytes
    def get_used_bytes(self):
        endpoint, api_data = self.call_api('bytes-used', self.base_url+"/1/statistics/current?key=node.disk.ifs.bytes.used.all&devid=all")

        total_disk_size_used = 0

        # Iterate over disks and add up totals
        for devid in api_data['stats']:
            for disk in devid['value']:
                disk_array_used_sum = sum(disk.values())
                total_disk_size_used = total_disk_size_used + disk_array_used_sum 

        return total_disk_size_used

    # Get total bytes
    def get_total_bytes(self):
        endpoint, api_data = self.call_api('bytes-total', self.base_url+"/1/statistics/current?key=node.disk.ifs.bytes.total.all&devid=all")

        total_disk_size = 0

        # Iterate over disks and add up total
        for devid in api_data['stats']:
            for disk in devid['value']:
                disk_array_sum = sum(disk.values())
                total_disk_size = total_disk_size + disk_array_sum 

        return total_disk_size

    def get_used_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate used_space as GB
        used_space = self.get_used_bytes()/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "used_space", utc_ms, srcname="storage-api", content=used_space, duration_ms=duration_ms)

    def get_total_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate total_space as GB
        total_space = self.get_total_bytes()/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "total_space", utc_ms, srcname="storage-api", content=total_space, duration_ms=duration_ms)

    def get_used_percentage(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate used_percentage
        used_percent = self.get_used_bytes()/self.get_total_bytes()*100

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "used_percentage", utc_ms, srcname="storage-api", content=used_percent, duration_ms=duration_ms)

    def get_available_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate available space as GB
        available_space = (self.get_total_bytes() - self.get_used_bytes())/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "available_space", utc_ms, srcname="storage-api", content=available_space, duration_ms=duration_ms)

    def get_percentage_up(self):
        utc_ms = int(time.time()*1000)

        # Get node status
        endpoint, api_data = self.call_api("node-percentage-up", self.base_url+"/3/cluster/nodes")

        total_up = 0
        total_nodes = api_data['total']

        # Iterate over the data and generate the list for TSV
        for npu in api_data['nodes']:
            # Check if the node state is in cluster, if it is increment total_up for percentage calc later
            if npu['state']['smartfail']['in_cluster'] == True:
                total_up = total_up + 1

        # Calculate total percentage up
        percent_up = (total_up / total_nodes) * 100

        # Calculate run duration
        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook SCALAR
        return agent.make_scalar("cmd", "percentage_up", utc_ms, srcname="storage-api", content=percent_up, duration_ms=duration_ms)

    def get_quotas(self):
        utc_ms = int(time.time()*1000)

        # Get the list of quotas from API
        endpoint, api_data = self.call_api('quota-list', self.base_url+"/7/quota/quotas")

        quotas = []

        # Iterate over the data and generate the list for TSV
        for q in api_data['quotas']:
            # Get path, usage, limit
            path = q['path']
            usage = q['usage']['fslogical']
            limit = q['thresholds']['hard']

            # Split path for analysis 
            path_data = q['path'].split('/')[1:]

            # Preliminary search for users
            if path_data[1] in ['xbox', 'vxbox', 'tvis', 'verification']:
                q_type = 'user'
            else:
                q_type = 'group'

            # If we can easily assume it's a user, set the owner var
            if q_type == 'user' and len(path_data) == 3:
                owner = path_data[2]
            else:
                # If it's not easy to assume we need to dig deeper
                endpoint, user_lookup = self.call_api('path-data', self.base_url+f"/1/auth/access/root?path={q['path']}")

                # if the owner isn't root, set the type to user and owner to the name. If the name value is missing use UID
                if 'name' in user_lookup['access'][0]['file']['owner'] and user_lookup['access'][0]['file']['owner']['name'] != "root":
                    q_type = 'user'
                    owner = user_lookup['access'][0]['file']['owner']['name']

                elif 'id' in user_lookup['access'][0]['file']['owner'] and user_lookup['access'][0]['file']['owner']['id'] != "UID:0":
                    q_type = 'user'
                    owner = user_lookup['access'][0]['file']['owner']['id'].split(":")[1]

                # If we didn't find a user, set the type to group, and owner to the group name
                elif 'name' in user_lookup['access'][0]['file']['group'] and user_lookup['access'][0]['file']['group']['name']:
                    q_type = 'group'
                    owner = user_lookup['access'][0]['file']['group']['name']

                elif 'id' in user_lookup['access'][0]['file']['group'] and user_lookup['access'][0]['file']['group']['id']:
                    q_type = 'group'
                    owner = user_lookup['access'][0]['file']['group']['id'].split(":")[1]

            # If there is a \ in the owner name, split it and get rid of the AD domain
            if "\\" in owner:
                owner = owner.split("\\")[1]

            # Generate unique hash for key 
            id = hashlib.md5(f"{owner}isilon{path}".encode())

            # Append data to list
            quotas.append([id.hexdigest(),owner,path,usage,limit,q_type])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return the runbook TSV
        return agent.make_tsv("cmd", "storage-quotas", utc_ms, srcname="storage-api", content=utils.list_to_tsv(quotas), \
                duration_ms=duration_ms, parserok="on", method="storage-monitor", \
                colnames="id,name,path,usage,quota,type")

    def get_storagepools(self):
        utc_ms = int(time.time()*1000)

        # Get list of storagepools
        endpoint, api_data = self.call_api('storagepools-list', self.base_url+"/9/storagepool/storagepools")

        storage_pools = []

        # Iterate over the data and generate the list for TSV
        for sp in api_data['storagepools']:
            storage_pools.append([sp['name'], sp['id'], sp['protection_policy'], sp['type'], sp['can_disable_l3'], \
                            sp['can_enable_l3'], sp['l3'], sp['l3_status'], sp['usage']['avail_bytes'], \
                            sp['usage']['avail_hdd_bytes'], sp['usage']['avail_ssd_bytes'], sp['usage']['balanced'], \
                            sp['usage']['free_bytes'], sp['usage']['free_hdd_bytes'], sp['usage']['free_ssd_bytes'], \
                            sp['usage']['pct_used'], sp['usage']['pct_used_hdd'], sp['usage']['pct_used_ssd'], \
                            sp['usage']['total_bytes'], sp['usage']['total_hdd_bytes'], sp['usage']['total_ssd_bytes'], \
                            sp['usage']['usable_bytes'], sp['usage']['usable_hdd_bytes'], sp['usage']['usable_ssd_bytes'], \
                            sp['usage']['used_bytes'], sp['usage']['used_hdd_bytes'], sp['usage']['used_ssd_bytes'], \
                            sp['usage']['virtual_hot_spare_bytes']])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-storagepools", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(storage_pools), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="name,id,protection_policy,type,can_disable_l3,can_enable_l3,l3,l3_status,avail_bytes,avail_hdd_bytes,avail_ssd_bytes,balanced,free_bytes,free_hdd_bytes,free_ssd_bytes,pct_used,pct_used_hdd,pct_used_ssd,total_bytes,total_hdd_bytes,total_ssd_bytes,usable_bytes,usable_hdd_bytes,usable_ssd_bytes,used_bytes,used_hdd_bytes,used_ssd_bytes,virtual_hot_spare_bytes")

    def get_network_interfaces(self):
        utc_ms = int(time.time()*1000)

        # Get list of network interfaces
        endpoint, api_data = self.call_api("networkinterfaces-list", self.base_url+"/7/network/interfaces")

        # COLL NAMES:
        # lnn,id,name,ip_addr,nic_name,status,type,owner
        interfaces = []

        # Iterate over the data and generate the list for TSV
        for ni in api_data['interfaces']:
            # Check if the owners are present
            if len(ni['owners']) > 0:
                # Check if the IPs are present
                if len(ni['ip_addrs']) > 0:
                    interfaces.append([ni['lnn'], ni['id'], ni['name'], ni['ip_addrs'][0], ni['nic_name'], ni['status'], ni['type'], \
                            f"{ni['owners'][0]['groupnet']}.{ni['owners'][0]['subnet']}.{ni['owners'][0]['pool']}"])
                else:
                    interfaces.append([ni['lnn'], ni['id'], ni['name'], "N/A", ni['nic_name'], ni['status'], ni['type'], \
                            f"{ni['owners'][0]['groupnet']}.{ni['owners'][0]['subnet']}.{ni['owners'][0]['pool']}"])
            else:
                # Check if the IPs are present
                if len(ni['ip_addrs']) > 0:
                    interfaces.append([ni['lnn'], ni['id'], ni['name'], ni['ip_addrs'][0], ni['nic_name'], ni['status'], ni['type'], "N/A"])
                else:
                    interfaces.append([ni['lnn'], ni['id'], ni['name'], "N/A", ni['nic_name'], ni['status'], ni['type'], "N/A"])

        # Sort lsit by lnn
        interfaces = sorted(interfaces, key=lambda x: x[0])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-network-interfaces", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(interfaces), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="lnn,id,name,ip_addr,nic_name,status,type,owner")

    def get_xfer_statistics(self):
        utc_ms = int(time.time()*1000)

        # Get transfer stats
        endpoint, api_data = self.call_api("xfer-stats", self.base_url+"/1/statistics/current?key=node.disk.xfers.rate.sum&nodes=all")
        
        # COLLNAMES
        # node, time, value
        node_stats = []

        # Iterate over the data and generate the list for TSV
        for xs in api_data['stats']:
            node_stats.append([xs['devid'], xs['time'], xs['value']])

        # Get the average
        average = (sum([val[2] for val in node_stats]) / len(node_stats))

        # Get the current time for the average metric
        avg_time = time.time()*1000

        # Append it to the list for TSV
        node_stats.append(['average', int(str(avg_time)[0:-8]), average])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-xfer-stats", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(node_stats), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="node,time,value")

    def get_nfs_client_stats(self):
        utc_ms = int(time.time()*1000)

        # Get NFS client stats
        endpoint, api_data = self.call_api("nfs-client-stat", self.base_url+"/1/statistics/current?key=node.clientstats.connected.nfs&nodes=all")
        
        # COLLNAMES
        # node, time, value
        nfs_client_count = []

        # Iterate over the data and generate the list for TSV
        for ncs in api_data['stats']:
            nfs_client_count.append([ncs['devid'], ncs['time'], ncs['value']])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSB
        return agent.make_tsv("cmd", "isilon-nfs-client-stats", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(nfs_client_count), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="node,time,value")

    def get_cifs_client_stats(self):
        utc_ms = int(time.time()*1000)

        # Get NIFS client status
        endpoint, api_data = self.call_api("cifs-client-stat", self.base_url+"/1/statistics/current?key=node.clientstats.connected.cifs&nodes=all")
        
        # COLLNAMES
        # node, time, value
        cifs_client_count = []

        # Iterate over the data and generate the list for TSV
        for ccs in api_data['stats']:
            cifs_client_count.append([ccs['devid'], ccs['time'], ccs['value']])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-cifs-client-stats", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(cifs_client_count), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="node,time,value")

    def get_client_statistics(self):
        utc_ms = int(time.time()*1000)

        # Get client stats 
        endpoint, api_data = self.call_api("client-stats", self.base_url+"/3/statistics/summary/client?totalby=remote_name")
        
        # COLNAMES 
        # remote_name,time,ops,time_avg,in,out,protocol,local_name,class
        client_stats = []

        # Iterate over the data and generate the list for TSV
        for cs in api_data['client']:
            client_stats.append([cs['remote_name'],cs['time'],cs['num_operations'],cs['time_avg'],cs['in'],cs['out'],cs['protocol'],cs['local_name'],cs['class']])

        duration_ms = int(time.time()*1000) - utc_ms
        
        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-client-stats", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(client_stats), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="remote_name,time,ops,time_avg,in,out,protocol,local_name,class")

    def get_ops_in(self):
        # Get time for duration calculation
        utc_ms = int(time.time()*1000)

        # get the api data
        endpoint, api_data = self.call_api("iops-in", self.base_url+"/1/statistics/current?key=node.ifs.ops.in.rate&nodes=all")

        # Setup list for TSV
        ops_in_list = []

        # Parse the data
        for node in api_data['stats']:
            id         = node['devid']
            error      = node['error']
            error_code = node['error_code']
            key        = node['key']
            ntime      = node['time']
            value      = node['value']

            ops_in_list.append([id,error,error_code,key,ntime,value])

        # Calculate duration
        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-ops-in", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(ops_in_list), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="id,error,errorCode,key,time,value")

    def get_ops_out(self):
        # Get time for duration calculation
        utc_ms = int(time.time()*1000)

        # get the api data
        endpoint, api_data = self.call_api("iops-out", self.base_url+"/1/statistics/current?key=node.ifs.ops.out.rate&nodes=all")

        # Setup list for TSV
        ops_out_list = []

        # Parse the data
        for node in api_data['stats']:
            id         = node['devid']
            error      = node['error']
            error_code = node['error_code']
            key        = node['key']
            ntime      = node['time']
            value      = node['value']

            ops_out_list.append([id,error,error_code,key,ntime,value])

        # Calculate duration
        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-ops-out", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(ops_out_list), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="id,error,errorCode,key,time,value")

    def get_client_statistics_protocol(self):
        utc_ms = int(time.time()*1000)

        # Get client stats by protocol
        endpoint, api_data = self.call_api("client-stats-protocol", self.base_url+"/3/statistics/summary/client?totalby=node,protocol,remote_name")
        
        client_stats_protocol = []

        # Iterate over the data and generate the list for TSV
        for csp in api_data['client']:
            client_stats_protocol.append([csp['node'], csp['num_operations'], csp['in'], csp['out'], csp['time'], \
                                    csp['time_avg'], csp['protocol'], csp['class'], csp['local_name'], csp['remote_name']])

        # Sort the list by node ID
        client_stats_protocol = sorted(client_stats_protocol, key = lambda x: x[0])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "isilon-client-stats-protocol", utc_ms, srcname="isilon-api", content=utils.list_to_tsv(client_stats_protocol), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="node,ops,in,out,time,time_avg,protocol,class,local_name,remote_name")
# END Isilon ##############################################################################################################################

# Pure ####################################################################################################################################
class Pure:
    def __init__(self, ip):
        self.ip = ip
        self.base_url = f"https://{ip}"
        self.api_url = f"https://{ip}/api/1.8"

        # Get password from env variable
        self.api_token = os.getenv("STORAGE_PASSWORD")

        # Verify password is set
        if self.api_token == '':
            final_return(False, "API Token incorrect.")

        # Start API session
        self.login()

    # Login to API and start session
    def login(self):
        # Try to login, check for connection errors
        try:
            r = requests.post(self.base_url+"/api/login", verify=False, headers={'api-token': self.api_token})
        except requests.exceptions.ConnectionError:
            final_return(False, "Invalid hostname.")

        # Make sure we're authenticated
        if "Authentication Failed" in r.text:
            final_return(False, "Invalid API key.")

        # If everything passes, grab the x_auth_token
        self.x_auth_token = r.headers['x-auth-token']

    # Make general calls to the API based off the route/url
    def call_api(self, url):
        # Make API call with the x_auth_token retrieved by login function
        try:
            r = requests.get(url, headers={'x-auth-token': self.x_auth_token}, verify=False)
        except requests.exceptions.ConnectionError:
            final_return(False, "Invalid hostname.")

        # Make sure we have access
        try:
            if "Access Denied" in r.text:
                final_return(False, "Invalid x-auth-token.")
            return json.loads(r.text) 
        # Check for json formatted data
        except json.decoder.JSONDecodeError:
            final_return(False, "API Failure. Not JSON format!")

    def get_used_bytes(self):
        api_data = self.call_api(self.api_url+"/file-systems")
        
        # Sizes
        provisioned_size    = api_data['total']['provisioned']
        virtual_size        = api_data['total']['space']['virtual']
        snapshots_size      = api_data['total']['space']['snapshots']
        unique_size         = api_data['total']['space']['unique']
        total_physical_size = api_data['total']['space']['total_physical']

        # Data reduced by
        data_reduction      = api_data['total']['space']['data_reduction']

        return total_physical_size

    def get_total_bytes(self):
        api_data = self.call_api(self.api_url+"/file-systems")
        
        # Sizes
        provisioned_size    = api_data['total']['provisioned']
        virtual_size        = api_data['total']['space']['virtual']
        snapshots_size      = api_data['total']['space']['snapshots']
        unique_size         = api_data['total']['space']['unique']
        total_physical_size = api_data['total']['space']['total_physical']

        # Data reduced by
        data_reduction      = api_data['total']['space']['data_reduction']

        return (total_physical_size * data_reduction)

    def get_used_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate used_space as GB
        used_space = self.get_used_bytes()/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "used_space", utc_ms, srcname="storage-api", content=used_space, duration_ms=duration_ms)

    def get_total_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate total_space as GB
        total_space = self.get_total_bytes()/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "total_space", utc_ms, srcname="storage-api", content=total_space, duration_ms=duration_ms)

    def get_used_percentage(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate used_percentage
        used_percent = self.get_used_bytes()/self.get_total_bytes()*100

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "used_percentage", utc_ms, srcname="storage-api", content=used_percent, duration_ms=duration_ms)

    def get_available_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate available space as GB
        available_space = (self.get_total_bytes() - self.get_used_bytes())/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "available_space", utc_ms, srcname="storage-api", content=available_space, duration_ms=duration_ms)

    def get_percentage_up(self):
        # Get current time
        utc_ms = int(time.time()*1000)
        
        # Get node status
        api_data = self.call_api(self.api_url+"/blades") 

        total_nodes = api_data['pagination_info']['total_item_count']
        
        total_up = 0

        # Iterate over API data
        for item in api_data['items']:

            # Check status for healthy system, if healthy increment total_up for percentage calc later
            if item['status'] == "healthy" or item['status'] == "unused":
                total_up = total_up + 1

        # Calculate percentage up
        percent_up = (total_up / total_nodes) * 100

        # calculate the run duration
        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook SCALAR
        return agent.make_scalar("cmd", "percentage_up", utc_ms, srcname="storage-api", content=percent_up, duration_ms=duration_ms)

    def get_quotas(self):
        # Get time
        utc_ms = int(time.time()*1000)

        # Get list of file systems
        api_data = self.call_api(self.api_url+"/file-systems")

        # Prepare quotas list
        quotas = []

        # Get quota data
        for item in api_data['items']:
            name = item['name']
            # Handle user quotas
            user_quotas = self.call_api(self.api_url+f"/usage/users?file_system_names={name}")
            for quota in user_quotas['items']:
                # Set username, if username is None, set to ID
                username = quota['user']['name']
                if username == None:
                    username = quota['user']['id']

                path     = quota['file_system']['name']
                usage    = quota['usage']
                limit    = quota['quota']
                q_type   = 'user'
                id = hashlib.md5(f"{username}pure{path}".encode())
                quotas.append([id.hexdigest(),username,path,usage,limit,q_type])
            
            # Handle group quotas
            group_quotas = self.call_api(self.api_url+f"/usage/groups?file_system_names={name}")
            for quota in group_quotas['items']:
                # Set username, if username is None, set to ID
                groupname = quota['group']['name']
                if groupname == None:
                    groupname = quota['group']['id']

                path     = quota['file_system']['name']
                usage    = quota['usage']
                limit    = quota['quota']
                q_type   = 'group'
                id = hashlib.md5(f"{groupname}pure{path}".encode())
                quotas.append([id.hexdigest(),groupname,path,usage,limit,q_type])

        # Calculate duration
        duration_ms = int(time.time()*1000) - utc_ms
                
        # Return the runbook TSV
        return agent.make_tsv("cmd", "storage-quotas", utc_ms, srcname="storage-api", content=utils.list_to_tsv(quotas), \
                duration_ms=duration_ms, parserok="on", method="storage-monitor", \
                colnames="id,name,path,usage,quota,type")
# END Pure ################################################################################################################################

# Qumulo ##################################################################################################################################
class Qumulo:
    def __init__(self, ip):
        self.ip = ip
        self.base_url = f"https://{ip}:8000"
        self.login_url = f"{self.base_url}/v1/session/login"

        # Get password from env variable
        self.password = os.getenv("STORAGE_PASSWORD")

        # Verify password is set
        if self.password == '':
            final_return(False, "Password in correct.")

        self.login()

    # Start API session
    def login(self):
        # Try to login, check for connection errors
        try:
            # Get login from env variable
            login = os.getenv("STORAGE_LOGIN", "NORTHAMERICA\\runbook_mntr")
            r = requests.post(self.login_url, verify=False, json={"username": login, "password": self.password}, headers={'Content-Type': 'application/json'})
        except requests.exceptions.ConnectionError:
            final_return(False, "Invalid hostname.")

        # Make sure we're authenticated 
        if "Incorrect username or password" in r.text:
            final_return(False, "Incorrect username or password.")

        # If everything is fine, set the bearer_token for calling the API
        try:
            self.bearer_token = json.loads(r.text)['bearer_token']
            #print(self.bearer_token + f" TOKEN\n")

        # Check for json formatted data
        except json.decoder.JSONDecodeError:
            final_return(False, "API Failure. Not JSON format!")

    # Make general API calls based on route
    def call_api(self, url):
        # Call the API endpoint, check for connection errors
        try:
            r = requests.get(url, verify=False, headers={'Authorization': "Bearer "+self.bearer_token})
        except requests.exceptions.ConnectionError:
            final_return(False, "Invalid hostname... DNS? Network?")

        # check for json formatted data
        try:
            return json.loads(r.text)
        except json.decoder.JSONDecodeError:
            final_return(False, "API Failure. Not JSON format!")

    # API Call for getting used bytes
    def get_used_bytes(self):
        # Get the filesystem data usage in bytes from API
        api_data = self.call_api(self.base_url+"/v1/file-system")

        # Return data
        return int(api_data['total_size_bytes']) - int(api_data['free_size_bytes'])

    # API Call for getting total bytes
    def get_total_bytes(self):
        # Get the filesystem total size in bytes from API
        api_data = self.call_api(self.base_url+"/v1/file-system")

        # Return data
        return int(api_data['total_size_bytes'])

    def get_used_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate used_space as GB
        used_space = self.get_used_bytes()/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "used_space", utc_ms, srcname="storage-api", content=used_space, duration_ms=duration_ms)

    def get_total_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate total_space as GB
        total_space = self.get_total_bytes()/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "total_space", utc_ms, srcname="storage-api", content=total_space, duration_ms=duration_ms)

    def get_used_percentage(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate used_percentage
        used_percent = self.get_used_bytes()/self.get_total_bytes()*100

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "used_percentage", utc_ms, srcname="storage-api", content=used_percent, duration_ms=duration_ms)

    def get_available_space(self):
        # Get current time in utc milliseconds
        utc_ms = int(time.time()*1000)

        # Calculate available space as GB
        available_space = (self.get_total_bytes() - self.get_used_bytes())/1000/1000/1000

        # Calculate duration in milliseconds
        duration_ms = int(time.time()*1000) - utc_ms

        # Return SCALAR
        return agent.make_scalar("cmd", "available_space", utc_ms, srcname="storage-api", content=available_space, duration_ms=duration_ms)

    def get_network_interfaces(self):
        utc_ms = int(time.time()*1000)

        # Get the list of network interfaces
        api_data = self.call_api(self.base_url+"/v2/network/interfaces/")

        interfaces = []

        # Iterate over the data and generate the list for TSV
        for ni in api_data:
            interfaces.append([ni['id'], ni['name'], ni['default_gateway'], ni['default_gateway_ipv6'], ni['bonding_mode'], ni['mtu']])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "qumulo-network-interfaces", utc_ms, srcname="qumulo-api", content=utils.list_to_tsv(interfaces), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="id,name,gateway,gateway_ipv6,bonding_mode,mtu")

    def get_client_statistics_protocol(self):
        utc_ms = int(time.time()*1000)

        # Get client stats
        api_data = self.call_api(self.base_url+"/v2/network/connections/")

        # COLNAMES
        # id,type,address
        client_stat_protocol = []

        # Iterate over the data and generate the list for TSV
        for id in api_data:
            for connection in id['connections']:
                client_stat_protocol.append([id['id'], connection['type'], connection['network_address']])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "qumulo-client-stats-protocol", utc_ms, srcname="qumulo-api", content=utils.list_to_tsv(client_stat_protocol), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="id,type,address")

    def get_snapshots(self):
        utc_ms = int(time.time()*1000)

        # Get list of snapshots
        api_data = self.call_api(self.base_url+"/v2/snapshots/status/")

        snapshots = []

        # Iterate over the data and generate the list for TSV
        for snapshot in api_data['entries']:
            snapshots.append([snapshot['id'], snapshot['name'], snapshot['timestamp'], snapshot['directory_name'], snapshot['source_file_id'], \
                    snapshot['source_file_path'], snapshot['created_by_policy'], snapshot['expiration']])

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook TSV
        return agent.make_tsv("cmd", "qumulo-snapshots", utc_ms, srcname="qumulo-api", content=utils.list_to_tsv(snapshots), \
        duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="id,name,timestamp,directory_name,source_file_id,source_file_path,created_by_policy,expiration")

    def get_percentage_up(self):
        utc_ms = int(time.time()*1000)

        # Get node status
        api_data = self.call_api(self.base_url+"/v1/cluster/nodes/")

        import pprint
        total_nodes = 0
        total_up = 0

        # Iterate over the data and generate the list for TSV
        for node in api_data:
            # Check if status online, if is increment total_up for total percentage later
            if node['node_status'] == "online":
                total_up = total_up + 1


            # Append to list for TSV
            total_nodes = total_nodes + 1
            
        # Calculate percentage up
        percent_up = (total_up / total_nodes) * 100

        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook SCALAR
        return agent.make_scalar("cmd", "percentage_up", utc_ms, srcname="storage-api", content=percent_up, duration_ms=duration_ms)

    def get_total_nodes(self):
        utc_ms = int(time.time()*1000)

        # Get node status
        api_data = self.call_api(self.base_url+"/v1/cluster/nodes/")

        import pprint
        total_nodes = 0
        total_up = 0

        # Iterate over the data and generate the list for TSV
        for node in api_data:
            # Check if status online, if is increment total_up for total percentage later
            if node['node_status'] == "online":
                total_up = total_up + 1


            # Append to list for TSV
            total_nodes = total_nodes + 1
            
        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook SCALAR
        return agent.make_scalar("cmd", "total_nodes", utc_ms, srcname="storage-api", content=total_nodes, duration_ms=duration_ms)

    def get_quotas(self):
        utc_ms = int(time.time()*1000)

        # Get the quotas from API
        api_data = self.call_api(self.base_url+"/v1/files/quotas/status/?limit=9999;")

        quotas = []

        for quota in api_data['quotas']:
            # Generate ID
            id = hashlib.md5(f"{quota['id']}qumulo{quota['path']}".encode())

            #
            # percent
            space_used_percent = 0;
            limit = int(quota['limit'])
            usage = int(quota['capacity_usage'])
            if limit > 0 and usage > 0:
                 space_used_percent = 100 / limit * usage

            # Append data to quotas list
            # all quotas a directory quotas
            quotas.append([id.hexdigest(),None,quota['path'],quota['capacity_usage'],quota['limit'],'directory',space_used_percent])

        # Calculate duration
        duration_ms = int(time.time()*1000) - utc_ms

        # Return the runbook TSV
        return agent.make_tsv("cmd", "qumulo-storage-quotas", utc_ms, srcname="storage-api", content=utils.list_to_tsv(quotas), \
                duration_ms=duration_ms, parserok="on", method="storage-monitor", \
                colnames="id,name,path,usage,quota,type,space_used_percent")


    def get_cluster_slots(self):
        utc_ms = int(time.time()*1000)

        # Get the quotas from API
        api_data = self.call_api(self.base_url+"/v1/cluster/slots/")

        slots = []

        for slot in api_data:
            # Append data to slots list
            slots.append([slot['id'],slot['node_id'],slot['slot'],slot['state'],     slot['slot_type'],slot['disk_type'],slot['disk_model'],slot['disk_serial_number'],slot['capacity'],slot['drive_bay'],slot['led_pattern']])
# id": "1.1", "node_id": 1, "slot": 1, "state": "healthy", "slot_type": "SSD", "disk_type": "SSD", "disk_model": "MZXL57T6HALA-000H3", "disk_serial_number": "S5Z1NE0R601275", "capacity": "7681231290368", "raw_capacity": "7681501126656", "minimum_raw_capacity": "7681500774400", "high_endurance": false, "drive_bay": "1.2", "led_pattern": "LED_PATTERN_NORMAL"

        # Calculate duration
        duration_ms = int(time.time()*1000) - utc_ms

        # Return the runbook TSV
        return agent.make_tsv("cmd", "qumulo-cluster-slots", utc_ms, srcname="storage-api", content=utils.list_to_tsv(slots), \
                duration_ms=duration_ms, parserok="on", method="storage-monitor", \
                colnames="id,node_id,slot,state,slot_type,disk_type,disk_model,disk_serial_number,capacity,drive_bay,led_pattern")

###########################################################################################################################################

# Azure Netapp ############################################################################################################################
class Aznetapp:
    def __init__(self, sid):
        self.sid = sid

    # Support function provided by Tony for communicating with Azure CLI
    def az_cli(self, args_str):
        # Split the command args into list
        args = args_str.split()

        # Initialize default CLI
        cli = get_default_cli()

        # Call CLI with args
        cli.invoke(args)

        # Check for results from CLI call, if true, return the results
        if cli.result.result:
            return cli.result.result

        # Check for errors from CLI call, if true, rause the errors
        elif cli.result.error:
            raise cli.result.error

        return True

    def gen_disk_usage_tsv(self):
        utc_ms = int(time.time()*1000)

        disks = []

        # Get accounts
        accounts = self.az_cli(f"resource list --subscription {self.sid} --resource-type Microsoft.NetApp/netAppAccounts --query [].id -o tsv --output none")

        # Iterate over accounts and gather name, resource group, and pools
        for a in accounts:
            name = self.az_cli(f"resource show --ids {a} --query name --output none")
            rg = self.az_cli(f"resource show --ids {a} --query resourceGroup --output none")
            pools = self.az_cli(f"netappfiles pool list --subscription {self.sid} --resource-group {rg} --account-name {name} --query [].id -o tsv --output none")

            # Iterate over pools and get the poolname and volumes
            for p in pools:
                poolname = re.sub(r".*/", "", self.az_cli(f"resource show --ids {p} --query name --output none"))
                volumes = self.az_cli(f"netappfiles volume list --subscription {self.sid} --resource-group {rg} --account-name {name} --pool-name {poolname} --query [].id -o tsv --output none")

                # Check if the volumes is a list, if so iterate over it
                if type(volumes) == list:

                    # Iterate over volumes and grab disk information
                    for v in volumes:
                        result = self.az_cli(f"monitor metrics list --resource {v} --interval PT60M --metric VolumeLogicalSize,VolumeAllocatedSize,VolumeSnapshotSize --query value[].timeseries[].data[-1].average -o tsv --output none")

                        # Make sure the result is a list
                        if type(result) == list:
                            diskused = result[0]
                            disksize = result[1]
                            snapshotsize = result[2]

                            # Append disk data to list for TSV
                            disks.append([v.split("/")[-1], diskused, disksize, (diskused / disksize), snapshotsize])
        
        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook ingestible TSV in json format
        return agent.make_tsv("cmd", "aznetapp-disk-usage", utc_ms, srcname="az-cli", \
                    content=utils.list_to_tsv(disks), duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="volume,diskused,allocated,percent,snapshotsize")

    def check_netapp_pools(self):
        utc_ms = int(time.time()*1000)
        capacity_pools = []
        
        # Get accounts
        accounts = self.az_cli(f"resource list --subscription {self.sid} --resource-type Microsoft.NetApp/netAppAccounts --query [].id -o tsv --output none")

        # Iterate over accounts and gather name, resource group, and pools
        for a in accounts:
            name = self.az_cli(f"resource show --ids {a} --query name --output none")
            rg = self.az_cli(f"resource show --ids {a} --query resourceGroup --output none")
            pools = self.az_cli(f"netappfiles pool list --subscription {self.sid} --resource-group {rg} --account-name {name} --query [].id -o tsv --output none")

            # Iterate over pools and get the poolname and volumes
            for p in pools:
                poolname = re.sub(r".*/", "", self.az_cli(f"resource show --ids {p} --query name --output none"))
                result = self.az_cli("monitor metrics list --resource " + p + " --interval PT60M --metric VolumePoolAllocatedUsed,VolumePoolAllocatedSize --query value[].timeseries[].data[-1].average -o tsv --output none")
                
                # Make sure the result is a list
                if type(result) == list:
                    diskused = result[0]
                    disksize = result[1]

                    # Append capacity data to list for TSV
                    capacity_pools.append([p.split("/")[-1], diskused, disksize, (diskused / disksize)])
        
        duration_ms = int(time.time()*1000) - utc_ms

        # Return runbook ingestible TSV in json format
        return agent.make_tsv("cmd", "aznetapp-capacity-pool", utc_ms, srcname="az-cli", \
                    content=utils.list_to_tsv(capacity_pools), duration_ms=duration_ms, parserok="on", method="storage-monitor", colnames="pool,diskused,allocated,percent")
###########################################################################################################################################

# Netapp ##################################################################################################################################
class Netapp:
    def __init__(self):
        pass
###########################################################################################################################################


# MAIN()
if __name__ == "__main__":
    # List of support cluster types for type validation
    ctypes = ['isilon', 'pure', 'qumulo', 'netapp', 'aznetapp']

    # Check for help first
    if len(sys.argv) == 2:
        if sys.argv[1] == "-h" or sys.argv[1] == "--help":
            print(f"Usage: ./{sys.argv[0]} <hostname> <cluster type> <command>")
            ex_types = '\n\t'.join(ctypes)
            print(f"\nCluster Types: \n\t{ex_types}")
            print(f"\nAvailable Commands: \n\tdisk usage")
            sys.exit(0)

    # Otherwise, check for all args
    if len(sys.argv) < 4:
        final_return(False, f"Missing CLI Arguments. Usage: {sys.argv[0]} <hostname> <cluster type> <subid|false> <command>")

    # Define system
    ip           = sys.argv[1]
    cluster_type = sys.argv[2]
    subid        = sys.argv[3]
    command      = sys.argv[4]


    # Check cluster types
    if cluster_type not in ctypes:
        final_return(False, f"cluster type must be of: {ctypes}")

    # Parse commands (without vs without spaces)
    if len(sys.argv) > 5:
        command = sys.argv[4:]
        command = " ".join(command)
    else:
        command = sys.argv[4]

    # Check cluster types and initialize their respective classes 
    # Start isilon #################################################
    if cluster_type == "isilon":
        # initialize isilon class
        storage = Isilon(ip)

        if command == "get storagepools":
            # Get storage pools
            print(storage.get_storagepools())

        elif command == "get xfer statistics":
            # Get transfer stats
            print(storage.get_xfer_statistics())

        elif command == "get nfs client stats":
            # Get nfs client stats
            print(storage.get_nfs_client_stats())

        elif command == "get cifs client stats":
            # Get cifs client stats
            print(storage.get_cifs_client_stats())

        elif command == "get client statistics":
            # Get client stats 
            print(storage.get_client_statistics())

        elif command == "get network interfaces":
            print("get network interfaces")
            # Get network interfaces
            print(storage.get_network_interfaces())

        elif command == "get ops in":
            print(storage.get_ops_in())

        elif command == "get ops out":
            print(storage.get_ops_out())

        elif command == "get client statistics protocol":
            # Get client stats by protocol
            print(storage.get_client_statistics_protocol())
    # End Isilon ###############################################
    # Start Pure ###############################################
    if cluster_type == "pure":
        # Initialize pure class
        storage = Pure(ip)
    # End Pure #####################################################
    # Start Qumulo #################################################
    if cluster_type == "qumulo":
        # Initialize qumulo class
        storage = Qumulo(ip)

        if command == "get network interfaces":
            print("get network interfaces")
            # Get network interfaces
            print(storage.get_network_interfaces())

        elif command == "get client statistics protocol":
            # Get client stats by protocol
            print(storage.get_client_statistics_protocol())

        elif command == "get snapshots":
            # Get qumulo snapshots
            print(storage.get_snapshots())

        elif command == "get cluster slots":
           # get Qumulo slots
            print(storage.get_cluster_slots())

        elif command == "get total nodes":
            print(storage.get_total_nodes())

    # End Qumulo ###################################################
    # Start Azure Netapp ###########################################
    if cluster_type == "aznetapp":
        # Initialize qumulo class
        if subid == "false":
            final_return(False, "Invalid subscription ID.")
        if os.getenv("COMPUTE_SITE") != "US_EUS_PD":
            final_return(False, f"Wrong environment: {os.getenv('COMPUTE_SITE')}")

        storage = Aznetapp(subid)

        if command == "get aznetapp pools" and cluster_type == "aznetapp":
            print(storage.check_netapp_pools())
    # End Azure Netapp #############################################

    # Function calls supported by all classes #################################################################
    if cluster_type != "aznetapp":
        if command == "get used space":
            print(storage.get_used_space())
        elif command == "get total space":
            print(storage.get_total_space())
        elif command == "get used percentage":
            print(storage.get_used_percentage())
        elif command == "get available space":
            print(storage.get_available_space())
        elif command == "get percentage up":
            print(storage.get_percentage_up())
        elif command == "get quotas":
            print(storage.get_quotas())

    # If any other combination, fail out!
    else:
        final_return(False, f"{command} not found for cluster type {cluster_type}.")
