1、paramiko模块
1.1、此函数是ssh模块:
import paramikodef ssh2(ip='127.0.0.1',port=22,username='',passwd='',cmd=''): """ ssh连接服务器 :return: """ ssh = paramiko.SSHClient() #创建SSH对象 ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #允许连接不在know_hosts文件中的主机 ssh.connect(ip,int(port),username,passwd) #连接服务器 stdin,stdout,stderr = ssh.exec_command(cmd) #执行命令,标准输入,输出,错误三个变量中 result = stdout.read() #获取命令的结果,输出是字节的类型 print(result.decode("utf-8")) #将字节的类型转换为utf-8 ssh.close()ssh2("192.168.1.1",22,"root","123456","df")1.2、sftpclient:
import paramikotransport = paramiko.Transport(('192.168.1.1',22))transport.connect(username='root', password='123456')sftp = paramiko.SFTPClient.from_transport(transport)# 将location.py 上传至服务器 /tmp/test.py,其实test.py是更改过的名字sftp.put('/tmp/location.py', '/tmp/test.py')# 将remove_path 下载到本地 local_path,local_path也是更改过的名字sftp.get('remove_path', 'local_path')transport.close()2、进程进程就是所以资源的集合,是程序的一次执行活动,属于动态概念。2.1、多进程第一进程p父进程是主进程,第二个p1的父进程是p,轮询的啊
from multiprocessing import Processimport osdef info(title): print(title) print('module name:',__name__) print('parent process:',os.getppid()) print('process id:',os.getpid()) # print('\n')def f(name): info("\033[31;1mcalled from child process function f\033[0m") print('hello',name)if __name__ == '__main__': info('\033[32;1mmain process line\033[0m') p = Process(target=f,args=('Tom',)) p.start() p1= Process(target=f,args=('bob',)) p1.start()2.2、进程间通讯:两个进程之间的数据传递队列方式:
from multiprocessing import Process,Queuedef f(q): q.put([42,None,'hello'])if __name__ == '__main__': q = Queue() p = Process(target=f,args=(q,)) p.start() print(q.get()) #父进程拿到了子进程的数据,这就是进程间通信中介Pipe方式:
from multiprocessing import Process,Pipe#相当于在父进程和子进程之间打通了一个虚拟通道,其实通过的是第三方def f(conn): conn.send([42,None,'hello']) conn.send([42,None,'hello world']) print("from parent:",conn.recv()) conn.close()if __name__ == '__main__': parent_conn,child_conn = Pipe() #父进程和子进程通过第三方交流 p = Process(target=f,args=(child_conn,)) p.start() print(parent_conn.recv()) print(parent_conn.recv()) parent_conn.send("大家可好") p.join()
2.3、进程间处理同一份数据,即共享数据:Manager
from multiprocessing import Process, Managerimport osdef f(d, l): d[1] = '1' d['2'] = 2 l.append(os.getpid()) print(l,d)if __name__ == '__main__': with Manager() as manager: d = manager.dict() #创建一个多个进程都可以共享的字典 l = manager.list(range(5)) #创建一个多进程可以共享的列表 p_list = [] for i in range(10): #生成是10个进程处理同一份数据 p = Process(target=f, args=(d, l)) #函数f p.start() p_list.append(p) for res in p_list: res.join() #必须等待进程处理结果,如果不写后边执行就有问题,会出现主进程直接关闭了 l.append("from parent") print(d) print(l)
2.4、进程池:同时运行执行的进程个数,因为进程太大,所以设置有进程池,其它不执行的就挂起
from multiprocessing import Process,Poolimport os,timedef Foo(i): time.sleep(2) print("in process",os.getpid()) return i+100def Bar(arg): print("--->exec done:",arg,os.getpid())if __name__ == '__main__': pool = Pool(processes=3) #只允许3个进程运行,其它都挂起 print("主进程",os.getpid()) for i in range(10): #apply_async代表异步执行 pool.apply_async(func=Foo,args=(i,),callback=Bar) #callback=回调,Bar中arg就是Foo返回的数据 # pool.apply(func=Foo, args=(i,)) #串行 print('end') pool.close() pool.join() #进程池中进程执行完毕后再关闭,如果注释,程序会直接关闭
3、线程线程是操作系统能够进行运算调度的最小单位。一条线程指的是进程中一个单一顺序的控制流,一个进程中可以并发多个线程,每条线程并行执行不同的任务3.1、创建线程:
import threadingimport timedef sayhi(num): print("running on number:%s" %num) time.sleep(3) print("task done",num)#线程是并行处理的,看是是并行处理,时分处理if __name__ == '__main__': t1 = threading.Thread(target=sayhi,args=(1,)) #创建线程,arg是传入变量,必须是元组格式 t2 = threading.Thread(target=sayhi,args=(2,)) t1.start() #启动线程 t2.start() print(t1.getName()) #获取线程名称 print(t2.getName())3.2、创建多个线程
import threadingimport timedef sayhi(num): print("running on number:%s" %num) time.sleep(3) print("task done",num)#线程是并行处理的,看是是并行处理,时分处理if __name__ == '__main__': t_objs = [] #存线程实例 for i in range(50): t = threading.Thread(target=sayhi,args=("t-%s" %i,)) t.start() t.join() #变成串行执行,只有第一个执行完才能执行第二个 t_objs.append(t)
for t in t_objs: t.join() #wait等待,保障有序的执行
print("main thread......") #函数式执行顺序3.3、继承式的线程方式
import threadingimport timeclass MyThread(threading.Thread): def __init__(self,n,sleep_time): super(MyThread,self).__init__() self.n = n self.sleep_time = sleep_time def run(self): #定义每个线程要运行的函数 print("running task",self.n) time.sleep(self.sleep_time) print("task done",self.n)t1 = MyThread("t1",2)t2 = MyThread("t2",4)t1.start()t2.start()t1.join() #wait,保障有序的执行t2.join()print("main thread......") #函数式执行顺序3.4、守护进程,随着主线程的结束而结束
import threadingimport timedef run(n): print("task",n) time.sleep(3) print("task done",n,threading.current_thread())start_time = time.time()t_objs = [] #存线程的列表for i in range(50): t = threading.Thread(target=run,args=("t-%s" %i,)) t.setDaemon(True) #设置为守护线程 t.start() t_objs.append(t)# for t in t_objs:# t.join() #如果不进行守护进程执行完,就会马上执行主线程,不会等待守护线程的是否执行完毕# time.sleep(2)#threading.active_count()目前活动的线程数print("-------all threads has finished...",threading.current_thread(),threading.active_count())print("cost:",time.time() - start_time)
3.5、互斥锁一个进程下可以启动多个线程,多个线程共享父进程的内存,时分复用时,线程可以拿到一份数据进行修改,所以导致多个线程可以处理同时处理一个数据,导致处理出来的数据出现错误,不过在python3.x中已经解决了
import threadingimport timedef run(n): lock.acquire() #变成了并行,时间太长 global num num +=1 time.sleep(1) lock.release()lock = threading.Lock()num = 0t_objs = [] #存线程实例for i in range(5): t = threading.Thread(target=run,args=("t-%s" %i ,)) t.start() t_objs.append(t) #为了不阻塞后面线程的启动,不在这里join,先放到一个列表里for t in t_objs: #循环线程实例列表,等待所有线程执行完毕 t.join()print("----------all threads has finished...",threading.current_thread(),threading.active_count())print("num:",num) #需要花费5秒左右才出
3.6、信号量: 就是同时 运行执行多少个线程
import threading,timedef run(n): semaphore.acquire() #这里必须先获取 time.sleep(1) print("run the thread: %s\n" %n) semaphore.release() #后释放t_objs = []if __name__ == '__main__': semaphore = threading.BoundedSemaphore(5) #每次只允许同时运行五个线程 for i in range(50): t = threading.Thread(target=run,args=(i,)) t.start() t_objs.append(t) for t in t_objs: t.join()while threading.active_count() != 1: passelse: print('----all threads done---')3.6、event事件event = threading.Event()event.wait() 等待event.set() 设置置位为1event.clear() 清除置位红绿灯示例:
import threading,timeimport randomevent = threading.Event() #线程的事件def light(): count = 0 if not event.is_set(): event.set() #置为位1 while True: if count > 5 and count < 10: event.clear() #清楚置位,就是0 print("\033[41;1mred light is on....\033[0m") elif count >=10: event.set() #置位为1 count = 0 else: print('\033[42;1mgreen light is on...\033[0m') time.sleep(1) count +=1def car(name): while True: time.sleep(random.randrange(10)) #随即休眠时间 if event.is_set(): print("[%s] running..." %name) else: print("[%s] sees red light,waiting...." %name) event.wait()if __name__ == '__main__': light = threading.Thread(target=light) light.start() for i in range(3): #新建3辆车 t = threading.Thread(target=car,args=(i,)) t.start()
3.7、队列
- class
queue.
Queue
(maxsize=0) #先入先出 - class
queue.
LifoQueue
(maxsize=0) #last in fisrt out - class
queue.
PriorityQueue
(maxsize=0) #存储数据时可设置优先级的队列
import queueq = queue.PriorityQueue()q.put((-1,"test1"))q.put((3,"test2"))q.put((10,"test4"))q.put((6,"test3"))for i in range(4): print(q.get())
#按照左边的数字排列,从小到大排列,负数也算3.8、生产者消费者模型
import threading,timeimport queueq = queue.Queue(maxsize=10) #队列里最多只能放10个def Producer(name): count = 1 while True: q.put("骨头%s" %count) print("生产了骨头",name) count +=1 time.sleep(2)def Consumer(name): # while q.qsize()>0: #不能这样设置,因为第一空的时候,导致跳出循环,就再也不会执行了 while True: # print(q.get()) print("[%s] 取到[%s] 并且吃了它...." %(name,q.get())) time.sleep(1)p = threading.Thread(target=Producer,args=("longlong",))c = threading.Thread(target=Consumer,args=("Dog_1",))c1 = threading.Thread(target=Consumer,args=("Dog_2",))p.start()c.start()c1.start()吃包子:
import time,randomimport queue,threadingq = queue.Queue()def Producer(name): count = 0 while count <20: time.sleep(random.randrange(3)) q.put(count) print('Producer %s has produced %s baozi..' %(name,count)) count +=1def Consumer(name): count = 0 while count <20: time.sleep(random.randrange(4)) if not q.empty(): # data = q.get() # print(data) print('\033[32;1mConsume %s has eat %s baozi...\033[0m' %(name,data)) else: print("-----no baozi anymore----") count +=1p1 = threading.Thread(target=Producer,args=('A',))c1 = threading.Thread(target=Consumer,args=('B',))p1.start()c1.start()
来源:https://www.cnblogs.com/qianyuyu/p/10210470.html