py3_cookbook_notes_03
文章目录
并发编程
启动与停止线程
# Code to execute in an independent thread
import time
def countdown(n):
while n > 0:
print('T-minus', n)
n -= 1
time.sleep(5)
# Create and launch a thread
from threading import Thread
t = Thread(target=countdown, args=(10,))
t.start()
if t.is_alive():
print('Still running')
else:
print('Completed')
t.join()
t.daemon=True
如果线程执行一些像I/O这样的阻塞操作,那么通过轮询来终止线程将使得线程之间的协调变得非常棘手。比如,如果一个线程一直阻塞在一个I/O操作上,它就永远无法返回,也就无法检查自己是否已经被结束了。要正确处理这些问题,你需要利用超时循环来小心操作线程。 例子如下:
class IOTask:
def terminate(self):
self._running = False
def run(self, sock):
# sock is a socket
sock.settimeout(5) # Set timeout period
while self._running:
# Perform a blocking I/O operation w/ timeout
try:
data = sock.recv(8192)
break
except socket.timeout:
continue
# Continued processing
...
# Terminated
return
判断线程是否已经启动
from threading import Thread, Event
import time
# Code to execute in an independent thread
def countdown(n, started_evt):
print('countdown starting')
started_evt.set()
while n > 0:
print('T-minus', n)
n -= 1
time.sleep(5)
# Create the event object that will be used to signal startup
started_evt = Event()
# Launch the thread and pass the startup event
print('Launching countdown')
t = Thread(target=countdown, args=(10,started_evt))
t.start()
# Wait for the thread to start
started_evt.wait()
print('countdown is running')
wait与notify机制:
from threading import Condition, Thread
import time
import random
num = 0
cv = Condition()
def decr_num():
global num
while True:
while num <=0:
with cv:
cv.wait(3)
num = num - 1
time.sleep(random.randint(0, 3))
def incr_num():
global num
while True:
time.sleep(random.randint(0, 3))
num = num + 1
with cv:
cv.notify_all()
def print_num():
while True:
print(num)
time.sleep(1)
if __name__ == '__main__':
decr_thread = Thread(target=decr_num)
decr_thread.start()
incr_thread = Thread(target=incr_num)
incr_thread.start()
print_thread = Thread(target=print_num)
print_thread.start()
信号量:
from threading import Semaphore, Thread
# Worker thread
def worker(n, sema):
# Wait to be signaled
sema.acquire()
try:
# Do some work
print('Working', n)
finally:
sema.release()
# Create some threads
sema = Semaphore(3)
nworkers = 10
for n in range(nworkers):
t = Thread(target=worker, args=(n, sema,))
t.start()
线程间通信
从一个线程向另一个线程发送数据最安全的方式可能就是使用 queue
库中的队列了。创建一个被多个线程共享的 Queue
对象,这些线程通过使用 put()
和 get()
操作来向队列中添加或者删除元素。 例如:
from queue import Queue
from threading import Thread
# A thread that produces data
def producer(out_q):
while True:
# Produce some data
...
out_q.put(data)
# A thread that consumes data
def consumer(in_q):
while True:
# Get some data
data = in_q.get()
# Process the data
...
# Create the shared queue and launch both threads
q = Queue()
t1 = Thread(target=consumer, args=(q,))
t2 = Thread(target=producer, args=(q,))
t1.start()
t2.start()
当使用队列时,协调生产者和消费者的关闭问题可能会有一些麻烦。一个通用的解决方法是在队列中放置一个特殊的值,当消费者读到这个值的时候,终止执行。例如:
from queue import Queue
from threading import Thread
# Object that signals shutdown
_sentinel = object()
# A thread that produces data
def producer(out_q):
while running:
# Produce some data
...
out_q.put(data)
# Put the sentinel on the queue to indicate completion
out_q.put(_sentinel)
# A thread that consumes data
def consumer(in_q):
while True:
# Get some data
data = in_q.get()
# Check for termination
if data is _sentinel:
in_q.put(_sentinel)
break
# Process the data
...
使用队列来进行线程间通信是一个单向、不确定的过程。通常情况下,你没有办法知道接收数据的线程是什么时候接收到的数据并开始工作的。不过队列对象提供一些基本完成的特性,比如下边这个例子中的 task_done()
和 join()
:
from queue import Queue
from threading import Thread
# A thread that produces data
def producer(out_q):
while running:
# Produce some data
...
out_q.put(data)
# A thread that consumes data
def consumer(in_q):
while True:
# Get some data
data = in_q.get()
# Process the data
...
# Indicate completion
in_q.task_done()
# Create the shared queue and launch both threads
q = Queue()
t1 = Thread(target=consumer, args=(q,))
t2 = Thread(target=producer, args=(q,))
t1.start()
t2.start()
# Wait for all produced items to be consumed
q.join()
如果一个线程需要在一个“消费者”线程处理完特定的数据项时立即得到通知,你可以把要发送的数据和一个 Event
放到一起使用,这样“生产者”就可以通过这个Event对象来监测处理的过程了。示例如下:
from queue import Queue
from threading import Thread, Event
# A thread that produces data
def producer(out_q):
while running:
# Produce some data
...
# Make an (data, event) pair and hand it to the consumer
evt = Event()
out_q.put((data, evt))
...
# Wait for the consumer to process the item
evt.wait()
# A thread that consumes data
def consumer(in_q):
while True:
# Get some data
data, evt = in_q.get()
# Process the data
...
# Indicate completion
evt.set()
给关键部分加锁
import threading
class SharedCounter:
'''
A counter object that can be shared by multiple threads.
'''
def __init__(self, initial_value = 0):
self._value = initial_value
self._value_lock = threading.Lock()
def incr(self,delta=1):
'''
Increment the counter with locking
'''
with self._value_lock:
self._value += delta
def decr(self,delta=1):
'''
Decrement the counter with locking
'''
with self._value_lock:
self._value -= delta
防止死锁的加锁机制
import threading
from contextlib import contextmanager
# Thread-local state to stored information on locks already acquired
_local = threading.local()
@contextmanager
def acquire(*locks):
# Sort locks by object identifier
locks = sorted(locks, key=lambda x: id(x))
# Make sure lock order of previously acquired locks is not violated
acquired = getattr(_local,'acquired',[])
if acquired and max(id(lock) for lock in acquired) >= id(locks[0]):
raise RuntimeError('Lock Order Violation')
# Acquire all of the locks
acquired.extend(locks)
_local.acquired = acquired
try:
for lock in locks:
lock.acquire()
yield
finally:
# Release locks in reverse order of acquisition
for lock in reversed(locks):
lock.release()
del acquired[-len(locks):]
import threading
x_lock = threading.Lock()
y_lock = threading.Lock()
def thread_1():
while True:
with acquire(x_lock, y_lock):
print('Thread-1')
def thread_2():
while True:
with acquire(y_lock, x_lock):
print('Thread-2')
t1 = threading.Thread(target=thread_1)
t1.daemon = True
t1.start()
t2 = threading.Thread(target=thread_2)
t2.daemon = True
t2.start()
避免死锁的主要思想是,单纯地按照对象id递增的顺序加锁不会产生循环依赖,而循环依赖是 死锁的一个必要条件,从而避免程序进入死锁状态。
保存线程的状态信息
from socket import socket, AF_INET, SOCK_STREAM
import threading
class LazyConnection:
def __init__(self, address, family=AF_INET, type=SOCK_STREAM):
self.address = address
self.family = AF_INET
self.type = SOCK_STREAM
self.local = threading.local()
def __enter__(self):
if hasattr(self.local, 'sock'):
raise RuntimeError('Already connected')
self.local.sock = socket(self.family, self.type)
self.local.sock.connect(self.address)
return self.local.sock
def __exit__(self, exc_ty, exc_val, tb):
self.local.sock.close()
del self.local.sock
from functools import partial
def test(conn):
with conn as s:
s.send(b'GET /index.html HTTP/1.0\r\n')
s.send(b'Host: www.python.org\r\n')
s.send(b'\r\n')
resp = b''.join(iter(partial(s.recv, 8192), b''))
print('Got {} bytes'.format(len(resp)))
if __name__ == '__main__':
conn = LazyConnection(('www.python.org', 80))
t1 = threading.Thread(target=test, args=(conn,))
t2 = threading.Thread(target=test, args=(conn,))
t1.start()
t2.start()
t1.join()
t2.join()
创建一个线程池
python自带的concurrent.futures.ThreadPoolExecutor
,连提交的任务队列都没有限制的,还好可以比较方便的扩展功能:
from concurrent.futures.thread import ThreadPoolExecutor
from threading import Lock, Condition
import time
class MaxPendingThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, *args, max_pending=0, **kwargs):
super().__init__(*args, **kwargs)
self._pending_count = 0
self._max_pending = max_pending
self._count_lock = Lock()
self._cv = Condition()
def _decr_pending_count(self):
with self._count_lock:
self._pending_count -= 1
with self._cv:
self._cv.notify_all()
def submit(self, fn, *args, **kwargs):
while self._max_pending > 0 and self._pending_count >= self._max_pending:
with self._cv:
self._cv.wait(2)
f = super().submit(fn, *args, **kwargs)
with self._count_lock:
self._pending_count += 1
f.add_done_callback(lambda _ : self._decr_pending_count())
return f
def do_work(n):
time.sleep(5)
print(n)
if __name__ == '__main__':
with MaxPendingThreadPoolExecutor(max_workers=2, max_pending=10) as executor:
for n in range(50):
executor.submit(do_work, n)
executor.shutdown()
简单的并行编程
有个程序要执行CPU密集型工作,想利用多核CPU的优势来运行的快一点。concurrent.futures
库提供了一个 ProcessPoolExecutor
类, 可被用来在一个单独的Python解释器中执行计算密集型函数。
ProcessPoolExecutor
的典型用法如下:
from concurrent.futures import ProcessPoolExecutor
with ProcessPoolExecutor() as pool:
...
do work in parallel using pool
...
其原理是,一个 ProcessPoolExecutor
创建N个独立的Python解释器, N是系统上面可用CPU的个数。你可以通过提供可选参数给 ProcessPoolExecutor(N)
来修改 处理器数量。这个处理池会一直运行到with块中最后一个语句执行完成, 然后处理池被关闭。不过,程序会一直等待直到所有提交的工作被处理完成。
Python的全局锁问题
# Processing pool (see below for initiazation)
pool = None
# Performs a large calculation (CPU bound)
def some_work(args):
...
return result
# A thread that calls the above function
def some_thread():
while True:
...
r = pool.apply(some_work, (args))
...
# Initiaze the pool
if __name__ == '__main__':
import multiprocessing
pool = multiprocessing.Pool()
这个通过使用一个技巧利用进程池解决了GIL的问题。 当一个线程想要执行CPU密集型工作时,会将任务发给进程池。 然后进程池会在另外一个进程中启动一个单独的Python解释器来工作。 当线程等待结果的时候会释放GIL。 并且,由于计算任务在单独解释器中执行,那么就不会受限于GIL了。 在一个多核系统上面,你会发现这个技术可以让你很好的利用多CPU的优势。
定义一个Actor任务
from queue import Queue
from threading import Thread, Event
# Sentinel used for shutdown
class ActorExit(Exception):
pass
class Actor:
def __init__(self):
self._mailbox = Queue()
def send(self, msg):
'''
Send a message to the actor
'''
self._mailbox.put(msg)
def recv(self):
'''
Receive an incoming message
'''
msg = self._mailbox.get()
if msg is ActorExit:
raise ActorExit()
return msg
def close(self):
'''
Close the actor, thus shutting it down
'''
self.send(ActorExit)
def start(self):
'''
Start concurrent execution
'''
self._terminated = Event()
t = Thread(target=self._bootstrap)
t.daemon = True
t.start()
def _bootstrap(self):
try:
self.run()
except ActorExit:
pass
finally:
self._terminated.set()
def join(self):
self._terminated.wait()
def run(self):
'''
Run method to be implemented by the user
'''
while True:
msg = self.recv()
# Sample ActorTask
class PrintActor(Actor):
def run(self):
while True:
msg = self.recv()
print('Got:', msg)
# Sample use
p = PrintActor()
p.start()
p.send('Hello')
p.send('World')
p.close()
p.join()
可以以元组形式传递标签消息,让actor执行不同的操作,如下:
class TaggedActor(Actor):
def run(self):
while True:
tag, *payload = self.recv()
getattr(self,'do_'+tag)(*payload)
# Methods correponding to different message tags
def do_A(self, x):
print('Running A', x)
def do_B(self, x, y):
print('Running B', x, y)
# Example
a = TaggedActor()
a.start()
a.send(('A', 1)) # Invokes do_A(1)
a.send(('B', 2, 3)) # Invokes do_B(2,3)
许在一个工作者中运行任意的函数, 并且通过一个特殊的Result对象返回结果:
from threading import Event
class Result:
def __init__(self):
self._evt = Event()
self._result = None
def set_result(self, value):
self._result = value
self._evt.set()
def result(self):
self._evt.wait()
return self._result
class Worker(Actor):
def submit(self, func, *args, **kwargs):
r = Result()
self.send((func, args, kwargs, r))
return r
def run(self):
while True:
func, args, kwargs, r = self.recv()
r.set_result(func(*args, **kwargs))
# Example use
worker = Worker()
worker.start()
r = worker.submit(pow, 2, 3)
print(r.result())
如果放宽对于同步和异步消息发送的要求(标准的Actor模型里消息是异步发送的),类actor对象还可以通过生成器来简化定义。例如:
def print_actor():
while True:
try:
msg = yield # Get a message
print('Got:', msg)
except GeneratorExit:
print('Actor terminating')
# Sample use
p = print_actor()
next(p) # Advance to the yield (ready to receive)
p.send('Hello')
p.send('World')
p.close()
Actor模型的理解可参考10 分钟了解 Actor 模型
实现消息发布/订阅模型
from contextlib import contextmanager
from collections import defaultdict
class Exchange:
def __init__(self):
self._subscribers = set()
def attach(self, task):
self._subscribers.add(task)
def detach(self, task):
self._subscribers.remove(task)
@contextmanager
def subscribe(self, *tasks):
for task in tasks:
self.attach(task)
try:
yield
finally:
for task in tasks:
self.detach(task)
def send(self, msg):
for subscriber in self._subscribers:
subscriber.send(msg)
# Dictionary of all created exchanges
_exchanges = defaultdict(Exchange)
# Return the Exchange instance associated with a given name
def get_exchange(name):
return _exchanges[name]
# Example of using the subscribe() method
exc = get_exchange('name')
with exc.subscribe(task_a, task_b):
...
exc.send('msg1')
exc.send('msg2')
...
# task_a and task_b detached here
使用生成器代替线程
# Two simple generator functions
def countdown(n):
while n > 0:
print('T-minus', n)
yield
n -= 1
print('Blastoff!')
def countup(n):
x = 0
while x < n:
print('Counting up', x)
yield
x += 1
from collections import deque
class TaskScheduler:
def __init__(self):
self._task_queue = deque()
def new_task(self, task):
'''
Admit a newly started task to the scheduler
'''
self._task_queue.append(task)
def run(self):
'''
Run until there are no more tasks
'''
while self._task_queue:
task = self._task_queue.popleft()
try:
# Run until the next yield statement
next(task)
self._task_queue.append(task)
except StopIteration:
# Generator is no longer executing
pass
# Example use
sched = TaskScheduler()
sched.new_task(countdown(10))
sched.new_task(countdown(5))
sched.new_task(countup(15))
sched.run()
多个线程队列轮询
import queue
import socket
import os
class PollableQueue(queue.Queue):
def __init__(self):
super().__init__()
# Create a pair of connected sockets
if os.name == 'posix':
self._putsocket, self._getsocket = socket.socketpair()
else:
# Compatibility on non-POSIX systems
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('127.0.0.1', 0))
server.listen(1)
self._putsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._putsocket.connect(server.getsockname())
self._getsocket, _ = server.accept()
server.close()
def fileno(self):
return self._getsocket.fileno()
def put(self, item):
super().put(item)
self._putsocket.send(b'x')
def get(self):
self._getsocket.recv(1)
return super().get()
import select
import threading
def consumer(queues):
'''
Consumer that reads data on multiple queues simultaneously
'''
while True:
can_read, _, _ = select.select(queues,[],[])
for r in can_read:
item = r.get()
print('Got:', item)
q1 = PollableQueue()
q2 = PollableQueue()
q3 = PollableQueue()
t = threading.Thread(target=consumer, args=([q1,q2,q3],))
t.daemon = True
t.start()
# Feed data to the queues
q1.put(1)
q2.put(10)
q3.put('hello')
q2.put(15)
...
在Unix系统上面启动守护进程
#!/usr/bin/env python3
# daemon.py
import os
import sys
import atexit
import signal
def daemonize(pidfile, *, stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null'):
if os.path.exists(pidfile):
raise RuntimeError('Already running')
# First fork (detaches from parent)
try:
if os.fork() > 0:
raise SystemExit(0) # Parent exit
except OSError as e:
raise RuntimeError('fork #1 failed.')
os.chdir('/')
os.umask(0)
os.setsid()
# Second fork (relinquish session leadership)
try:
if os.fork() > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('fork #2 failed.')
# Flush I/O buffers
sys.stdout.flush()
sys.stderr.flush()
# Replace file descriptors for stdin, stdout, and stderr
with open(stdin, 'rb', 0) as f:
os.dup2(f.fileno(), sys.stdin.fileno())
with open(stdout, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stdout.fileno())
with open(stderr, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stderr.fileno())
# Write the PID file
with open(pidfile,'w') as f:
print(os.getpid(),file=f)
# Arrange to have the PID file removed on exit/signal
atexit.register(lambda: os.remove(pidfile))
# Signal handler for termination (required)
def sigterm_handler(signo, frame):
raise SystemExit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
def main():
import time
sys.stdout.write('Daemon started with pid {}\n'.format(os.getpid()))
while True:
sys.stdout.write('Daemon Alive! {}\n'.format(time.ctime()))
time.sleep(10)
if __name__ == '__main__':
PIDFILE = '/tmp/daemon.pid'
if len(sys.argv) != 2:
print('Usage: {} [start|stop]'.format(sys.argv[0]), file=sys.stderr)
raise SystemExit(1)
if sys.argv[1] == 'start':
try:
daemonize(PIDFILE,
stdout='/tmp/daemon.log',
stderr='/tmp/dameon.log')
except RuntimeError as e:
print(e, file=sys.stderr)
raise SystemExit(1)
main()
elif sys.argv[1] == 'stop':
if os.path.exists(PIDFILE):
with open(PIDFILE) as f:
os.kill(int(f.read()), signal.SIGTERM)
else:
print('Not running', file=sys.stderr)
raise SystemExit(1)
else:
print('Unknown command {!r}'.format(sys.argv[1]), file=sys.stderr)
raise SystemExit(1)
脚本编程与系统管理
通过重定向/管道/文件接受输入
#!/usr/bin/env python3
import fileinput
with fileinput.input() as f_input:
for line in f_input:
print(line, end='')
那么你就能以前面提到的所有方式来为此脚本提供输入。假设你将此脚本保存为 filein.py
并将其变为可执行文件, 那么你可以像下面这样调用它,得到期望的输出:
$ ls | ./filein.py # Prints a directory listing to stdout.
$ ./filein.py /etc/passwd # Reads /etc/passwd to stdout.
$ ./filein.py < /etc/passwd # Reads /etc/passwd to stdout.
终止程序并给出错误信息
raise SystemExit('It failed!')
# 也可以使用复杂一些的办法
import sys
sys.stderr.write('It failed!\n')
raise SystemExit(1)
解析命令行选项
# search.py
'''
Hypothetical command-line tool for searching a collection of
files for one or more text patterns.
'''
import argparse
parser = argparse.ArgumentParser(description='Search some files')
parser.add_argument(dest='filenames',metavar='filename', nargs='*')
parser.add_argument('-p', '--pat',metavar='pattern', required=True,
dest='patterns', action='append',
help='text pattern to search for')
parser.add_argument('-v', dest='verbose', action='store_true',
help='verbose mode')
parser.add_argument('-o', dest='outfile', action='store',
help='output file')
parser.add_argument('--speed', dest='speed', action='store',
choices={'slow','fast'}, default='slow',
help='search speed')
args = parser.parse_args()
# Output the collected arguments
print(args.filenames)
print(args.patterns)
print(args.verbose)
print(args.outfile)
print(args.speed)
运行时弹出密码输入提示
import getpass
user = getpass.getuser()
passwd = getpass.getpass()
if svc_login(user, passwd): # You must write svc_login()
print('Yay!')
else:
print('Boo!')
获取终端的大小
>>> import os
>>> sz = os.get_terminal_size()
>>> sz
os.terminal_size(columns=80, lines=24)
>>> sz.columns
80
>>> sz.lines
24
>>>
执行外部命令并获取它的输出
import subprocess
try:
out_bytes = subprocess.check_output(['netstat','-a'], timeout=5, stderr=subprocess.STDOUT)
out_text = out_bytes.decode('utf-8')
out_bytes = subprocess.check_output('grep python | wc > out', shell=True)
except subprocess.CalledProcessError as e:
out_bytes = e.output # Output generated before error
code = e.returncode # Return code
使用 check_output()
函数是执行外部命令并获取其返回值的最简单方式。 但是,如果你需要对子进程做更复杂的交互,比如给它发送输入,你得采用另外一种方法。 这时候可直接使用 subprocess.Popen
类。例如:
import subprocess
# Some text to send
text = b'''
hello world
this is a test
goodbye
'''
# Launch a command with pipes
p = subprocess.Popen(['wc'],
stdout = subprocess.PIPE,
stdin = subprocess.PIPE)
# Send the data and get the output
stdout, stderr = p.communicate(text)
# To interpret as text, decode
out = stdout.decode('utf-8')
err = stderr.decode('utf-8')
复制或者移动文件和目录
import shutil
# Copy src to dst. (cp src dst)
shutil.copy(src, dst)
# Copy files, but preserve metadata (cp -p src dst)
shutil.copy2(src, dst)
# Copy directory tree (cp -R src dst)
shutil.copytree(src, dst)
# Move src to dst (mv src dst)
shutil.move(src, dst)
def ignore_pyc_files(dirname, filenames):
return [name in filenames if name.endswith('.pyc')]
shutil.copytree(src, dst, ignore=ignore_pyc_files)
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('*~', '*.pyc'))
创建和解压归档文件
>>> import shutil
>>> shutil.unpack_archive('Python-3.3.0.tgz')
>>> shutil.make_archive('py33','zip','Python-3.3.0')
'/Users/beazley/Downloads/py33.zip'
>>>
>>> shutil.get_archive_formats()
[('bztar', "bzip2'ed tar-file"), ('gztar', "gzip'ed tar-file"),
('tar', 'uncompressed tar file'), ('zip', 'ZIP file')]
>>>
通过文件名查找文件
#!/usr/bin/env python3.3
import os
def findfile(start, name):
for relpath, dirs, files in os.walk(start):
if name in files:
full_path = os.path.join(start, relpath, name)
print(os.path.normpath(os.path.abspath(full_path)))
if __name__ == '__main__':
findfile(sys.argv[1], sys.argv[2])
读取配置文件
>>> from configparser import ConfigParser
>>> cfg = ConfigParser()
>>> cfg.read('config.ini')
['config.ini']
>>> cfg.sections()
['installation', 'debug', 'server']
>>> cfg.get('installation','library')
'/usr/local/lib'
>>> cfg.getboolean('debug','log_errors')
True
>>> cfg.getint('server','port')
8080
>>> cfg.getint('server','nworkers')
32
>>> print(cfg.get('server','signature'))
\=================================
Brought to you by the Python Cookbook
\=================================
>>>
>>> cfg.set('server','port','9000')
>>> cfg.set('debug','log_errors','False')
>>> import sys
>>> cfg.write(sys.stdout)
给简单脚本增加日志功能
import logging
def main():
# Configure the logging system
logging.basicConfig(
filename='app.log',
level=logging.ERROR,
format='%(levelname)s:%(asctime)s:%(message)s'
)
# Variables (to make the calls that follow work)
hostname = 'www.python.org'
item = 'spam'
filename = 'data.csv'
mode = 'r'
# Example logging calls (insert into your program)
logging.critical('Host %s unknown', hostname)
logging.error("Couldn't find %r", item)
logging.warning('Feature is deprecated')
logging.info('Opening file %r, mode=%r', filename, mode)
logging.debug('Got here')
if __name__ == '__main__':
main()
上面的日志配置都是硬编码到程序中的。如果你想使用配置文件, 可以像下面这样修改 basicConfig()
调用:
import logging.config
def main():
# Configure the logging system
logging.config.fileConfig('logconfig.ini')
...
创建一个下面这样的文件,名字叫 logconfig.ini
:
[loggers]
keys=root
[handlers]
keys=defaultHandler
[formatters]
keys=defaultFormatter
[logger_root]
level=INFO
handlers=defaultHandler
qualname=root
[handler_defaultHandler]
class=FileHandler
formatter=defaultFormatter
args=('app.log', 'a')
[formatter_defaultFormatter]
format=%(levelname)s:%(name)s:%(message)s
给函数库增加日志功能
# somelib.py
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# Example function (for testing)
def func():
log.critical('A Critical Error!')
log.debug('A debug message')
使用这个配置,默认情况下不会打印日志。例如:
>>> import somelib
>>> somelib.func()
>>>
不过,如果配置过日志系统,那么日志消息打印就开始生效,例如:
>>> import logging
>>> logging.basicConfig()
>>> somelib.func()
CRITICAL:somelib:A Critical Error!
>>>
实现一个计时器
import time
class Timer:
def __init__(self, func=time.perf_counter):
self.elapsed = 0.0
self._func = func
self._start = None
def start(self):
if self._start is not None:
raise RuntimeError('Already started')
self._start = self._func()
def stop(self):
if self._start is None:
raise RuntimeError('Not started')
end = self._func()
self.elapsed += end - self._start
self._start = None
def reset(self):
self.elapsed = 0.0
@property
def running(self):
return self._start is not None
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def countdown(n):
while n > 0:
n -= 1
with Timer() as t2:
countdown(1000000)
print(t2.elapsed)
time.perf_counter()
和 time.process_time()
都会返回小数形式的秒数时间。 实际的时间值没有任何意义,为了得到有意义的结果,你得执行两次函数然后计算它们的差值。
限制内存和CPU的使用量
import signal
import resource
import os
def time_exceeded(signo, frame):
print("Time's up!")
raise SystemExit(1)
def set_max_runtime(seconds):
# Install the signal handler and set a resource limit
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (seconds, hard))
signal.signal(signal.SIGXCPU, time_exceeded) # 程序运行时,SIGXCPU 信号在时间过期时被生成,然后执行清理并退出。
def limit_memory(maxsize):
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (maxsize, hard))
if __name__ == '__main__':
set_max_runtime(15)
limit_memory(1024) # 像这样设置了内存限制后,程序运行到没有多余内存时会抛出 MemoryError 异常。
while True:
pass
启动一个WEB浏览器
>>> import webbrowser
>>> webbrowser.open('http://www.python.org')
True
>>>
>>> # Open the page in a new browser window
>>> webbrowser.open_new('http://www.python.org')
True
>>>
>>> # Open the page in a new browser tab
>>> webbrowser.open_new_tab('http://www.python.org')
True
>>>
>>> c = webbrowser.get('firefox')
>>> c.open('http://www.python.org')
True
>>> c.open_new_tab('http://docs.python.org')
True
>>>
测试、调试、异常
测试stdout输出
# mymodule.py
def urlprint(protocol, host, domain):
url = '{}://{}.{}'.format(protocol, host, domain)
print(url)
from io import StringIO
from unittest import TestCase
from unittest.mock import patch
import mymodule
class TestURLPrint(TestCase):
def test_url_gets_to_stdout(self):
protocol = 'http'
host = 'www'
domain = 'example.com'
expected_url = '{}://{}.{}\n'.format(protocol, host, domain)
with patch('sys.stdout', new=StringIO()) as fake_out:
mymodule.urlprint(protocol, host, domain)
self.assertEqual(fake_out.getvalue(), expected_url)
在单元测试中给对象打补丁
from unittest.mock import patch
import example
@patch('example.func')
def test1(x, mock_func):
example.func(x) # Uses patched example.func
mock_func.assert_called_with(x)
在单元测试中测试异常情况
import unittest
# A simple function to illustrate
def parse_int(s):
return int(s)
class TestConversion(unittest.TestCase):
def test_bad_int(self):
with self.assertRaisesRegex(ValueError, 'invalid literal .*'):
r = parse_int('N/A')
将测试输出用日志记录到文件中
import unittest
class MyTest(unittest.TestCase):
pass
import sys
def main(out=sys.stderr, verbosity=2):
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(sys.modules[__name__])
unittest.TextTestRunner(out,verbosity=verbosity).run(suite)
if __name__ == '__main__':
with open('testing.out', 'w') as f:
main(f)
unittest
模块中一些值得关注的内部工作原理。
unittest
模块首先会组装一个测试套件。 这个测试套件包含了你定义的各种方法。一旦套件组装完成,它所包含的测试就可以被执行了。
这两步是分开的,unittest.TestLoader
实例被用来组装测试套件。 loadTestsFromModule()
是它定义的方法之一,用来收集测试用例。 它会为 TestCase
类扫描某个模块并将其中的测试方法提取出来。 如果你想进行细粒度的控制, 可以使用 loadTestsFromTestCase()
方法来从某个继承TestCase的类中提取测试方法。 TextTestRunner
类是一个测试运行类的例子, 这个类的主要用途是执行某个测试套件中包含的测试方法。 这个类跟执行 unittest.main()
函数所使用的测试运行器是一样的。 不过,我们在这里对它进行了一些列底层配置,包括输出文件和提升级别。
忽略或期望测试失败
unittest
模块有装饰器可用来控制对指定测试方法的处理,例如:
import unittest
import os
import platform
class Tests(unittest.TestCase):
def test_0(self):
self.assertTrue(True)
@unittest.skip('skipped test')
def test_1(self):
self.fail('should have failed!')
@unittest.skipIf(os.name=='posix', 'Not supported on Unix')
def test_2(self):
import winreg
@unittest.skipUnless(platform.system() == 'Darwin', 'Mac specific test')
def test_3(self):
self.assertTrue(True)
@unittest.expectedFailure
def test_4(self):
self.assertEqual(2+2, 5)
if __name__ == '__main__':
unittest.main()
输出警告信息
import warnings
def func(x, y, logfile=None, debug=False):
if logfile is not None:
warnings.warn('logfile argument deprecated', DeprecationWarning)
...
调试基本的程序崩溃错误
如果你的程序因为某个异常而崩溃,运行 python3 -i someprogram.py
可执行简单的调试。 -i
选项可让程序结束后打开一个交互式shell。 然后你就能查看环境,例如,假设你有下面的代码:
# sample.py
def func(n):
return n + 10
func('Hello')
运行 python3 -i sample.py
会有类似如下的输出:
bash % python3 -i sample.py
Traceback (most recent call last):
File "sample.py", line 6, in <module>
func('Hello')
File "sample.py", line 4, in func
return n + 10
TypeError: Can't convert 'int' object to str implicitly
>>> func(10)
20
>>>
给你的程序做性能测试
bash % python3 -m cProfile someprogram.py
859647 function calls in 16.016 CPU seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
263169 0.080 0.000 0.080 0.000 someprogram.py:16(frange)
513 0.001 0.000 0.002 0.000 someprogram.py:30(generate_mandel)
262656 0.194 0.000 15.295 0.000 someprogram.py:32(<genexpr>)
1 0.036 0.036 16.077 16.077 someprogram.py:4(<module>)
262144 15.021 0.000 15.021 0.000 someprogram.py:4(in_mandelbrot)
1 0.000 0.000 0.000 0.000 os.py:746(urandom)
1 0.000 0.000 0.000 0.000 png.py:1056(_readable)
1 0.000 0.000 0.000 0.000 png.py:1073(Reader)
1 0.227 0.227 0.438 0.438 png.py:163(<module>)
512 0.010 0.000 0.010 0.000 png.py:200(group)
...
bash %
或者用装饰器:
import time
from functools import wraps
def timethis(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
r = func(*args, **kwargs)
end = time.perf_counter()
print('{}.{} : {}'.format(func.__module__, func.__name__, end - start))
return r
return wrapper
>>> @timethis
... def countdown(n):
... while n > 0:
... n -= 1
...
或者用上下文:
from contextlib import contextmanager
@contextmanager
def timeblock(label):
start = time.perf_counter()
try:
yield
finally:
end = time.perf_counter()
print('{} : {}'.format(label, end - start))
>>> with timeblock('counting'):
... n = 10000000
... while n > 0:
... n -= 1
...
对于测试很小的代码片段运行性能,使用 timeit
模块会很方便,例如:
>>> timeit('sqrt(2)', 'from math import sqrt', number=10000000)
1.0270336690009572
加速程序运行
由于局部变量和全局变量的实现方式(使用局部变量要更快些),因此脚本语句放入函数中会运行得更快一些
通常你可以使用
from module import name
这样的导入形式,尽可能去掉属性访问对于频繁访问的名称,通过将这些名称变成局部变量可以加速程序运行
任何时候当你使用额外的处理层(比如装饰器、属性访问、描述器)去包装你的代码时,都会让程序运行变慢,避免不必要的抽象
使用内置的容器
避免创建不必要的数据结构或复制
总结
终于大体看完了Python Cookbook 3rd Edition这本书,这次看书还是比较细致的,除了与C语言扩展相关的部分没看外,其它部分都看过来了,将一些容易忘记的代码也摘录到博客里以备忘。另外在网上发现别人写的一篇《流畅的python》阅读笔记,也挺不错的,抽空也去看看这篇笔记。
文章作者 徐新杰
上次更新 2017-10-23
许可协议 © Copyright 2020 Jeremy Xu