- 13th Jul 2024
- 20:40 pm
- Admin
In this assignment, we delve into advanced SSH command execution on Ubuntu servers, leveraging basic skills to automate system checks across multiple machines. Tasked by a manager, we'll determine the hostname and inspect the presence of files in the www directory on two servers, with plans to scale this solution efficiently across a network of 100 servers. Achieve the following items:
- Make a Flowchart.
- Create Pseudocode.
- Make a single SSH connection via paramiko and send a command to check
- the hostname and a second command to see if there is anything in the www directory.
- Input the IP addresses from a file (text or CSV).
- Save the output of both commands, make sure to note what IP the data came from, to a
- file (text or CSV).
Scalable SSH Command Execution on Ubuntu Servers - Get Assignment Solution
Please note that this is a sample assignment solved by our Python Programmers. These solutions are intended to be used for research and reference purposes only. If you can learn any concepts by going through the reports and code, then our Python Tutors would be very happy.
- Option 1 - To download the complete solution along with Code, Report and screenshots - Please visit our Programming Assignment Sample Solution page
- Option 2 - Reach out to our Python Tutors to get online tutoring related to this assignment and get your doubts cleared
- Option 3 - You can check the partial solution for this assignment in this blog below
Free Project Solution - Scalable SSH Command Execution on Ubuntu Servers
from urllib.request import urlopen
from internet import Internet
from typing import List
import requests
from bs4 import BeautifulSoup
import re
from queue import PriorityQueue, Queue
from collections import deque
class Parser:
@staticmethod
def get_links_in_page(html: str) -> List[str]:
"""
In this method, we should parse a page's HTML and return a list of links in the page.
Be sure not to return any link with a DISALLOWED character.
All links should be of the form "/wiki/<page name>", as to not follow external links
"""
links = []
disallowed = Internet.DISALLOWED
# YOUR CODE HERE
# You can look into using regex, or just use Python's find methods to find the <a> tags or any other identifiable features
# A good starting place is to print out `html` and look for patterns before/after the links that you can string.find().
# Make sure your list doesn't have duplicates. Return the list in the same order as they appear in the HTML.
# This function will be stress tested so make it efficient!
inter=Internet()
html_text = inter.get_page(html)
soup = BeautifulSoup(html_text, 'html.parser')
for link in soup.find_all('a', href=re.compile(r'/wiki/*')):
# print('link',link.get('href'))
if link.get('href') not in disallowed and link.get('href').split('.')[-1] not in ['jpg', 'png', 'gif','svg'] and any(i in link.get('href') for i in ['#','?','&','=','%','-',':','//']) == False :
links.append(link.get('href'))
# links = list(set(links))
links = list(dict.fromkeys(links))
# to save the list data in a text file
with open('wikiracer.txt', 'w') as output:
for row in links:
output.write(str(row) + '\n')
for index,value in enumerate(links):
value=value.split('org')[-1]
if value.startswith('/wiki/'):
links[index]=value
return links
# In these methods, we are given a source page and a goal page, and we should return
# the shortest path between the two pages. Be careful! Wikipedia is very large.
# These are all very similar algorithms, so it is advisable to make a global helper function that does all of the work, and have
# each of these call the helper with a different data type (stack, queue, priority queue, etc.)
class BFSProblem:
def __init__(self):
self.internet = Internet()
# Example in/outputs:
# bfs(source = "/wiki/Computer_science", goal = "/wiki/Computer_science") == ["/wiki/Computer_science", "/wiki/Computer_science"]
# bfs(source = "/wiki/Computer_science", goal = "/wiki/Computation") == ["/wiki/Computer_science", "/wiki/Computation"]
# Find more in the test case file.
# Do not try to make fancy optimizations here. The autograder depends on you following standard BFS and will check all of the pages you download.
# Links should be inserted into the queue as they are located in the page, and should be obtained using Parser's get_links_in_page.
# Be very careful not to add things to the "visited" set of pages too early. You must wait for them to come out of the queue first.
# See if you can figure out why.
# This applies for bfs, dfs, and dijkstra's.
# Download a page with self.internet.get_page().
def bfs(self, source = "/wiki/Calvin_Li", goal = "/wiki/Wikipedia"):
path = [source]
links=Parser().get_links_in_page(source)
queue = Queue()
queue.put(source)
visited = set()
visited.add(source)
while not queue.empty():
current = queue.get()
if current == goal:
path.append(goal)
return path
for link in links:
if link not in visited:
queue.put(link)
visited.add(link)
# path.append(link)
return None
# return path # if no path exists, return None
class DFSProblem:
def __init__(self):
self.internet = Internet()
# Links should be inserted into a stack as they are located in the page. Do not add things to the visited list until they are taken out of the stack.
def dfs(self, source = "/wiki/Calvin_Li", goal = "/wiki/Wikipedia"):
path = [source]
links=Parser().get_links_in_page(source)
stack = deque()
stack.append(source)
visited = set()
visited.add(source)
while len(stack) > 0:
current = stack.pop()
if current == goal:
path.append(goal)
return path
for link in links:
if link not in visited:
stack.append(link)
visited.add(link)
# path.append(link)
return None
# stack = deque()
# while len(stack) != 0:
# current = stack.pop()
# if current not in path:
# path.append(current)
# if current not in links:
# continue
# for link in links[current]:
# stack.append(link)
# return path
class DijkstrasProblem:
def __init__(self):
self.internet = Internet()
# Links should be inserted into the heap as they are located in the page.
# By default, the cost of going to a link is the length of a particular destination link's name. For instance,
# if we consider /wiki/a -> /wiki/ab, then the default cost function will have a value of 8.
# This cost function is overridable and your implementation will be tested on different cost functions. Use costFn(node1, node2)
# to get the cost of a particular edge.
# You should return the path from source to goal that minimizes the total cost. Assume cost > 0 for all edges.
def dijkstras(self, source = "/wiki/Calvin_Li", goal = "/wiki/Wikipedia", costFn = lambda x, y: len(y)):
path = [source]
# YOUR CODE HERE
links=Parser().get_links_in_page(source)
queue = PriorityQueue()
queue.put(source, 0)
visited = set()
visited.add(source)
while not queue.empty():
current = queue.get()
if current == goal:
path.append(goal)
return path
for link in links:
if link not in visited:
queue.put(link, costFn(current, link))
visited.add(link)
# path.append(link)
return None
class WikiracerProblem:
def __init__(self):
self.internet = Internet()
# Time for you to have fun! Using what you know, try to efficiently find the shortest path between two wikipedia pages.
# Your only goal here is to minimize the total amount of pages downloaded from the Internet, as that is the dominating time-consuming action.
# Your answer doesn't have to be perfect by any means, but we want to see some creative ideas.
# One possible starting place is to get the links in `goal`, and then search for any of those from the source page, hoping that those pages lead back to goal.
# Note: a BFS implementation with no optimizations will not get credit, and it will suck.
# You may find Internet.get_random() useful, or you may not.
def wikiracer(self, source = "/wiki/Calvin_Li", goal = "/wiki/Wikipedia"):
path = [goal]
# YOUR CODE HERE
# ...
links=Parser().get_links_in_page(goal)
queue = Queue()
queue.put(goal)
visited = set()
visited.add(goal)
while not queue.empty():
current = queue.get()
if current == source:
path.append(source)
return path
for link in links:
if link not in visited:
queue.put(link)
visited.add(link)
# path.append(link)
return None
# KARMA
class FindInPageProblem:
def __init__(self):
self.internet = Internet()
# This Karma problem is a little different. In this, we give you a source page, and then ask you to make up some heuristics that will allow you to efficiently
# find a page containing all of the words in `query`. Again, optimize for the fewest number of internet downloads, not for the shortest path.
def find_in_page(self, source = "/wiki/Calvin_Li", query = ["ham", "cheese"]):
# raise NotImplementedError("Karma method find_in_page")
path = [source]
# find a path to a page that contains ALL of the words in query in any place within the page
# path[-1] should be the page that fulfills the query.
# YOUR CODE HERE
# inter=Internet()
links=set(Parser().get_links_in_page(source))
print(source,query)
# return links
if query.__len__()>=1:
for link in links:
for word in query:
try:
# print(f"https://en.wikipedia.org/{link}")
if word in urlopen(f"https://en.wikipedia.org{link}").read().decode('utf-8'):
path.append(link)
except:
pass
if path.__len__()>1:
return path
else:
return None
Get the best Scalable SSH Command Execution on Ubuntu Servers Assignment help and tutoring services from our experts now!
About The Author - Dr. Emily Parker
Dr. Emily Parker is adept in SSH connections, command execution, and file handling, specializing in network automation and system administration. With a focus on scaling solutions across multiple servers, Dr. Parker excels in efficient management and reporting using tools like Paramiko, ensuring robust scripts for streamlined operations and enhanced network monitoring capabilities.