mirror of
https://github.com/lopes/netbox-scanner.git
synced 2025-07-03 12:04:37 +02:00
Merge branch 'master' into improve_nmap
This commit is contained in:
commit
c7ee9b1b77
269
nbs/__init__.py
269
nbs/__init__.py
@ -1,4 +1,10 @@
|
||||
import configparser
|
||||
import logging
|
||||
import pynetbox.models.dcim
|
||||
import requests
|
||||
import docker
|
||||
import ipaddress
|
||||
from collections import deque
|
||||
|
||||
import requests
|
||||
from pynetbox import api
|
||||
@ -112,3 +118,266 @@ class NetBoxScanner(object):
|
||||
|
||||
return True
|
||||
|
||||
def init_docker(self, dockerDef: configparser.SectionProxy):
|
||||
ctype = dockerDef.get('cluster_type')
|
||||
clusterType = self.netbox.virtualization.cluster_types.get(name=ctype)
|
||||
if clusterType is None:
|
||||
self.netbox.virtualization.cluster_types.create(name=ctype,slug=ctype)
|
||||
|
||||
def sync_docker(self, dockerConf: dict[str, str], dockerDef: configparser.SectionProxy):
|
||||
|
||||
try:
|
||||
deviceName = dockerConf['device']
|
||||
devices = self.netbox.dcim.devices.filter(name=deviceName)
|
||||
if len(devices) == 0:
|
||||
logging.error(f'No devices matched name {deviceName}')
|
||||
device = next(devices)
|
||||
site = device.site
|
||||
clusterName = f'Docker {device['name']}'
|
||||
cluster = self.netbox.virtualization.clusters.get(name=clusterName)
|
||||
if cluster is None:
|
||||
logging.info(f'No Cluster exists for device {deviceName}, creating...')
|
||||
clusterType = self.netbox.virtualization.cluster_types.get(name=dockerDef.get('cluster_type'))
|
||||
clusterParams = {
|
||||
'name': clusterName,
|
||||
'type': clusterType['id'],
|
||||
'status': 'active'
|
||||
}
|
||||
if site is not None:
|
||||
clusterParams['site'] = site.id
|
||||
cluster = self.netbox.virtualization.clusters.create(**clusterParams)
|
||||
self.netbox.dcim.devices.update([
|
||||
{
|
||||
'id': device.id, 'cluster': cluster.id
|
||||
}
|
||||
])
|
||||
|
||||
client = docker.DockerClient(base_url=dockerConf['host'])
|
||||
networks = client.networks.list()
|
||||
containers = client.containers.list()
|
||||
|
||||
networkData = {}
|
||||
|
||||
for container in containers:
|
||||
|
||||
logging.info(f'Processing Container: {container.name}')
|
||||
|
||||
vmName = 'Docker Standalone'
|
||||
# is standalone or compose?
|
||||
composed = 'com.docker.compose.config-hash' in container.labels
|
||||
composeProject = None
|
||||
if composed:
|
||||
composeProject = container.labels['com.docker.compose.project']
|
||||
vmName = f'Docker Compose {composeProject}'
|
||||
vm = self.netbox.virtualization.virtual_machines.get(name=vmName,cluster_id=cluster.id)
|
||||
|
||||
if vm is None:
|
||||
vm = self.netbox.virtualization.virtual_machines.create(
|
||||
name=vmName,
|
||||
status="active",
|
||||
cluster=cluster.id,
|
||||
device=device.id,
|
||||
site=site.id
|
||||
)
|
||||
logging.info(f'Created missing VM for docker compose project {vmName} with ID {vm.id}')
|
||||
# if composeProject is None:
|
||||
# # it's a bridge
|
||||
# if 'bridge' not in networkData:
|
||||
# net = self.docker_upsert_network(device, nw)
|
||||
# networkData['bridge'] = net
|
||||
|
||||
containerNetworks = []
|
||||
containerNetwork = None
|
||||
ips = []
|
||||
hasExternalIp = False
|
||||
ns = container.attrs.get('NetworkSettings')
|
||||
if ns is not None:
|
||||
for networkName in ns['Networks']:
|
||||
if networkName not in networkData:
|
||||
for nw in networks:
|
||||
if nw.name == networkName:
|
||||
net = self.docker_upsert_network(device, nw)
|
||||
networkData[networkName] = net
|
||||
containerNetwork = net
|
||||
break
|
||||
else:
|
||||
containerNetwork = networkData[networkName]
|
||||
containerNetworks.append({ 'netNetwork': containerNetwork, 'netContainer': ns['Networks'][networkName] })
|
||||
if 'external' in containerNetwork:
|
||||
hasExternalIp = True
|
||||
|
||||
for nets in containerNetworks:
|
||||
|
||||
interfaceName = f'compose_{composeProject}' if composed and nets['netNetwork']['name'] != 'bridge' else 'bridge'
|
||||
intParams = {
|
||||
'virtual_machine_id': vm.id,
|
||||
'name': interfaceName
|
||||
}
|
||||
if nets['netNetwork']['vrf'] is not None:
|
||||
intParams['vrf_id'] = nets['netNetwork']['vrf'].id
|
||||
interface = self.netbox.virtualization.interfaces.get(**intParams)
|
||||
if interface is None:
|
||||
del intParams['virtual_machine_id']
|
||||
intParams['virtual_machine'] = vm.id
|
||||
del intParams['vrf_id']
|
||||
if nets['netNetwork']['vrf'] is not None:
|
||||
intParams['vrf'] = nets['netNetwork']['vrf'].id
|
||||
interface = self.netbox.virtualization.interfaces.create(**intParams)
|
||||
logging.info(f'Created missing Virtual Interface {interfaceName} for VM {vm.id}')
|
||||
|
||||
containerIp = nets['netContainer']['IPAddress']
|
||||
if containerIp == '' and nets['netNetwork']['name'] == 'host':
|
||||
if nets['netNetwork']['external'] is None:
|
||||
logging.info(f'Cant process because on host network but no external ip determined!')
|
||||
continue
|
||||
containerIp = nets['netNetwork']['external'].split('/')[0]
|
||||
ipLookupParams = {
|
||||
'address': f'{containerIp}/32',
|
||||
#'vminterface_id': vm.id
|
||||
}
|
||||
if nets['netNetwork']['vrf'] is not None:
|
||||
ipLookupParams['vrf_id'] = nets['netNetwork']['vrf'].id
|
||||
ipLookupParams['vminterface_id'] = vm.id
|
||||
ip = self.netbox.ipam.ip_addresses.get(**ipLookupParams)
|
||||
if ip is None:
|
||||
ipCreateParams = {
|
||||
'address': f'{containerIp}/32',
|
||||
}
|
||||
if nets['netNetwork']['vrf'] is not None:
|
||||
ipCreateParams['assigned_object_type'] = 'virtualization.vminterface'
|
||||
ipCreateParams['assigned_object_id'] = vm.id
|
||||
ipCreateParams['vrf'] = nets['netNetwork']['vrf'].id
|
||||
# address=f'{containerIp}/32',vrf=nets['netNetwork']['vrf'].id,assigned_object_type='virtualization.vminterface',assigned_object_id=interface.id
|
||||
ip = self.netbox.ipam.ip_addresses.create(**ipCreateParams)
|
||||
logging.info(f'Created missing IP {containerIp} on {interfaceName} interface')
|
||||
ips.append(ip)
|
||||
|
||||
tcp = False
|
||||
ports = []
|
||||
ipIds = []
|
||||
|
||||
for containerPortDesc in container.ports:
|
||||
if 'tcp' in containerPortDesc:
|
||||
tcp = True
|
||||
portList = container.ports[containerPortDesc]
|
||||
if portList is not None:
|
||||
ports.append(portList[0]['HostPort'])
|
||||
|
||||
for ip in ips:
|
||||
ipIds.append(ip.id)
|
||||
|
||||
serviceName = container.name
|
||||
service = self.netbox.ipam.services.get(name=serviceName,virtual_machine_id=vm.id)
|
||||
if service is None:
|
||||
serviceDescription = None
|
||||
if 'org.opencontainers.image.title' in container.labels:
|
||||
serviceDescription = container.labels['org.opencontainers.image.title']
|
||||
if 'org.opencontainers.image.description' in container.labels:
|
||||
if serviceDescription is not None:
|
||||
serviceDescription = f'{serviceDescription} - {container.labels['org.opencontainers.image.description']}'
|
||||
else:
|
||||
serviceDescription = container.labels['org.opencontainers.image.description']
|
||||
|
||||
serviceParams = {
|
||||
'name': serviceName,
|
||||
'virtual_machine': vm.id,
|
||||
'ipaddresses': ipIds
|
||||
}
|
||||
if serviceDescription is not None:
|
||||
# https://stackoverflow.com/a/2872519/1469797
|
||||
serviceParams['description'] = (serviceDescription[:50] + '..') if len(serviceDescription) > 50 else serviceDescription
|
||||
if len(ports) > 0:
|
||||
serviceParams['ports'] = ports
|
||||
serviceParams['protocol'] = 'tcp' if tcp else 'udp'
|
||||
else:
|
||||
serviceParams['ports'] = [1]
|
||||
serviceParams['protocol'] = 'tcp'
|
||||
|
||||
# if serviceDescription is not None:
|
||||
# service = self.netbox.ipam.services.create(name=serviceName,virtual_machine=vm.id,description=serviceDescription,ipaddresses=ipIds,ports=ports,protocol='tcp' if tcp else 'udp')
|
||||
# else:
|
||||
# service = self.netbox.ipam.services.create(name=serviceName,virtual_machine=vm.id,ipaddresses=ipIds,ports=ports,protocol='tcp' if tcp else 'udp')
|
||||
service = self.netbox.ipam.services.create(**serviceParams)
|
||||
logging.info(f'Created missing service {service} on VM {vm.id}')
|
||||
else:
|
||||
|
||||
serviceUpdateParams = {
|
||||
'ipaddresses': ipIds,
|
||||
'id': service.id
|
||||
}
|
||||
if len(ports) > 0:
|
||||
serviceUpdateParams['ports'] = ports
|
||||
serviceUpdateParams['protocol'] = 'tcp' if tcp else 'udp'
|
||||
|
||||
# update addresses and ports
|
||||
self.netbox.ipam.services.update([
|
||||
serviceUpdateParams
|
||||
])
|
||||
logging.info(f'Update addresses and ports for service {service} on VM {vm.id}')
|
||||
|
||||
except ValueError as e:
|
||||
logging.error(e)
|
||||
return False
|
||||
|
||||
# def docker_upsert_vm(self, cluster, device: pynetbox.models.dcim.Devices):
|
||||
|
||||
def docker_upsert_network(self,device: pynetbox.models.dcim.Devices, d_network: docker.client.NetworkCollection.model):
|
||||
primaryIp = None
|
||||
if device.primary_ip4 is not None:
|
||||
primaryIp = device.primary_ip4.address
|
||||
|
||||
networkName = d_network.name
|
||||
driver = d_network.attrs.get('Driver')
|
||||
if networkName == 'host' or networkName == 'none' or driver == 'host':
|
||||
return {
|
||||
'name': networkName,
|
||||
'vrf': None,
|
||||
'range': None,
|
||||
'internal': primaryIp,
|
||||
'external': primaryIp
|
||||
}
|
||||
# deal with this later
|
||||
if driver != 'bridge':
|
||||
return {
|
||||
'name': networkName,
|
||||
'vrf': None,
|
||||
'range': None,
|
||||
'internal': None,
|
||||
'external': None
|
||||
}
|
||||
|
||||
vrf = self.netbox.ipam.vrfs.get(name=f'Docker on {device['name']}')
|
||||
if vrf is None:
|
||||
vrf = self.netbox.ipam.vrfs.create(name=f'Docker on {device['name']}', rd=f'docker-{device['name']}')
|
||||
logging.info(f'Created missing VRF for docker on {device['name']} with ID {vrf.id}')
|
||||
|
||||
range = self.netbox.ipam.ip_ranges.get(description=networkName,vrf_id=vrf.id)
|
||||
if range is None:
|
||||
subnet = d_network.attrs.get('IPAM')['Config'][0]['Subnet']
|
||||
subnet_range = self.get_subnet_range(subnet)
|
||||
range = self.netbox.ipam.ip_ranges.create(
|
||||
description=networkName,
|
||||
vrf=vrf.id,
|
||||
start_address=f'{subnet_range[0].exploded}/32',
|
||||
end_address=f'{subnet_range[1].exploded}/32'
|
||||
)
|
||||
logging.info(f'Created missing IP Range for docker network {networkName} {range.start_address} => {range.end_address} in VRF {vrf.id}')
|
||||
|
||||
return {
|
||||
'name': networkName,
|
||||
'vrf': vrf,
|
||||
'range': range,
|
||||
'internal': None,
|
||||
'external': primaryIp
|
||||
}
|
||||
# subnet = d_network.attrs.get('IPAM')['Config'][0]['Subnet']
|
||||
|
||||
def get_subnet_range(self, subnet):
|
||||
ip_range = ipaddress.ip_network(subnet)
|
||||
hosts = ip_range.hosts()
|
||||
firstHost = next(ip_range.hosts())
|
||||
|
||||
# https://stackoverflow.com/a/48232574/1469797
|
||||
dd = deque(hosts, maxlen=1)
|
||||
lastHost = dd.pop()
|
||||
return [firstHost, lastHost]
|
@ -28,3 +28,13 @@ password =
|
||||
unknown = autodiscovered:netbox-scanner
|
||||
tag = prime
|
||||
cleanup = yes
|
||||
|
||||
[Docker:firstSource]
|
||||
host = unix:///var/run/docker.sock
|
||||
device=MyNetboxDevice
|
||||
|
||||
[Docker:secondSource]
|
||||
# using something like docker-socket-proxy
|
||||
# With NETWORK=1 CONTAINERS=1 POST=0
|
||||
host = tcp://192.168.1.1:2375
|
||||
device=MyOtherNetboxDevice
|
@ -2,6 +2,8 @@
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import docker
|
||||
import ipaddress
|
||||
|
||||
from configparser import ConfigParser
|
||||
from argparse import ArgumentParser
|
||||
@ -37,12 +39,22 @@ else:
|
||||
raise FileNotFoundError('Configuration file was not found.')
|
||||
|
||||
netbox = config['NETBOX']
|
||||
tag = 'netbox-scanner'
|
||||
cleanup = False
|
||||
if argument == 'nmap':
|
||||
nmap = config['NMAP']
|
||||
tag = nmap['tag']
|
||||
cleanup = nmap.getboolean('cleanup')
|
||||
if argument == 'netxms':
|
||||
netxms = config['NETXMS']
|
||||
tag = netxms['tag']
|
||||
if argument == 'prime':
|
||||
prime = config['PRIME']
|
||||
tag = prime['tag']
|
||||
# if argument == 'docker':
|
||||
# dockerConf = config['Docker']
|
||||
# tag = dockerConf['tag']
|
||||
# cleanup = dockerConf.getboolean('cleanup')
|
||||
|
||||
parser = ArgumentParser(description='netbox-scanner')
|
||||
subparsers = parser.add_subparsers(title='Commands', dest='command')
|
||||
@ -53,6 +65,8 @@ if argument == 'netxms':
|
||||
argsp = subparsers.add_parser('netxms', help='NetXMS module')
|
||||
if argument == 'prime':
|
||||
argsp = subparsers.add_parser('prime', help='Cisco Prime module')
|
||||
if argument == 'docker':
|
||||
argsp = subparsers.add_parser('docker', help='Docker module')
|
||||
args = parser.parse_args()
|
||||
|
||||
logfile = '{}/netbox-scanner-{}.log'.format(
|
||||
@ -101,14 +115,30 @@ def cmd_prime(s): # prime handler
|
||||
h.run() # set access_point=True to process APs
|
||||
s.sync(h.hosts)
|
||||
|
||||
def cmd_docker(s):
|
||||
|
||||
if not config.has_section('dockerdef'):
|
||||
config.add_section('dockerdef')
|
||||
dockerDef = config['dockerdef'];
|
||||
dockerDef.setdefault('cluster_type', 'Docker')
|
||||
dockerDef.setdefault('cluster_prefix', 'Docker')
|
||||
dockerDef.setdefault('vrf_prefix', 'Docker')
|
||||
scanner.init_docker(dockerDef)
|
||||
|
||||
for s in config.sections():
|
||||
if not s.startswith('Docker:'):
|
||||
continue
|
||||
|
||||
dockerSection = dict(config.items(s))
|
||||
scanner.sync_docker(dockerSection, dockerDef)
|
||||
|
||||
if __name__ == '__main__':
|
||||
scanner = NetBoxScanner(
|
||||
netbox['address'],
|
||||
netbox['token'],
|
||||
netbox['tls_verify'],
|
||||
nmap['tag'],
|
||||
nmap.getboolean('cleanup')
|
||||
tag,
|
||||
cleanup
|
||||
)
|
||||
|
||||
if args.command == 'nmap':
|
||||
@ -121,5 +151,7 @@ if __name__ == '__main__':
|
||||
scanner.tag = prime['tag']
|
||||
scanner.cleanup = prime.getboolean('cleanup')
|
||||
cmd_prime(scanner)
|
||||
elif args.command == 'docker':
|
||||
cmd_docker(scanner)
|
||||
|
||||
exit(0)
|
||||
|
@ -6,3 +6,5 @@ requests==2.25.0
|
||||
six==1.15.0
|
||||
urllib3==1.26.2
|
||||
python3-nmap==1.4.9
|
||||
setuptools
|
||||
docker
|
||||
|
Loading…
x
Reference in New Issue
Block a user