一大波更新
|
@ -1,5 +1,6 @@
|
|||
__pycache__
|
||||
*.txt
|
||||
album
|
||||
|
||||
._*
|
||||
|
||||
|
|
52
main.py
|
@ -1,9 +1,10 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import gi, sys, os
|
||||
import gi, sys, os, threading
|
||||
# import dbus
|
||||
# import dbus.service, dbus.mainloop.glib
|
||||
from pprint import pprint as print
|
||||
import musicbrainzngs as mus
|
||||
|
||||
gi.require_version('Gtk', '3.0')
|
||||
|
||||
|
@ -17,23 +18,58 @@ from mpd.base import MPDClient
|
|||
app_id = 'fun.wkit.sonist'
|
||||
|
||||
|
||||
def run_async(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
return thread
|
||||
return wrapper
|
||||
|
||||
|
||||
class Application(Gtk.Application):
|
||||
def __init__(self):
|
||||
Gtk.Application.__init__(self, application_id = app_id)
|
||||
|
||||
self.add_window(SonistWindow())
|
||||
self.mpd = MPDClient()
|
||||
self.mpd.timeout = 10
|
||||
self.mpd.connect("localhost", 6600)
|
||||
|
||||
self.mpc = MPDClient()
|
||||
self.mpc.timeout = 10
|
||||
self.mpc.connect("localhost", 6600)
|
||||
self.mpd.ping()
|
||||
|
||||
self.connect('window-removed', self.on_window_removed)
|
||||
|
||||
mus.set_useragent('Sonist Gtk', '0.0.1', 'https://github.com/app-cat/sonist-gtk')
|
||||
|
||||
@run_async
|
||||
def get_cover(self, song, filepath, callback):
|
||||
try:
|
||||
data = mus.search_releases(song["artist"], song["title"], 1)
|
||||
release_id = data["release-list"][0]["release-group"]["id"]
|
||||
print(release_id)
|
||||
buff = mus.get_release_group_image_front(release_id, size = 128)
|
||||
with open(filepath, 'wb') as file:
|
||||
output = file.write(buff)
|
||||
callback(filepath)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def do_activate(self):
|
||||
print('hello mpc')
|
||||
self.set_app_menu(None)
|
||||
self.set_menubar(None)
|
||||
# self.window.show_all()
|
||||
|
||||
self.window = SonistWindow(self)
|
||||
self.add_window(self.window)
|
||||
|
||||
self.window.show_all()
|
||||
|
||||
|
||||
def on_window_removed(self, app, win):
|
||||
if len(self.get_windows()) == 0:
|
||||
print('朕要休息了~~~')
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -62,8 +98,8 @@ if __name__ == "__main__":
|
|||
|
||||
|
||||
app = Application()
|
||||
app.run(sys.argv)
|
||||
|
||||
# ApplicationService(app)
|
||||
app.run(sys.argv)
|
||||
|
||||
|
||||
Gtk.main()
|
|
@ -27,12 +27,3 @@ from mpd.base import MPDError
|
|||
from mpd.base import PendingCommandError
|
||||
from mpd.base import ProtocolError
|
||||
from mpd.base import VERSION
|
||||
|
||||
|
||||
try:
|
||||
from mpd.twisted import MPDProtocol
|
||||
except ImportError:
|
||||
|
||||
class MPDProtocol:
|
||||
def __init__():
|
||||
raise "No twisted module found"
|
||||
|
|
593
mpd/asyncio.py
|
@ -1,593 +0,0 @@
|
|||
"""Asynchronous access to MPD using the asyncio methods of Python 3.
|
||||
|
||||
Interaction happens over the mpd.asyncio.MPDClient class, whose connect and
|
||||
command methods are coroutines.
|
||||
|
||||
Some commands (eg. listall) additionally support the asynchronous iteration
|
||||
(aiter, `async for`) interface; using it allows the library user to obtain
|
||||
items of result as soon as they arrive.
|
||||
|
||||
The .idle() method works differently here: It is an asynchronous iterator that
|
||||
produces a list of changed subsystems whenever a new one is available. The
|
||||
MPDClient object automatically switches in and out of idle mode depending on
|
||||
which subsystems there is currently interest in.
|
||||
|
||||
Command lists are currently not supported.
|
||||
|
||||
|
||||
This module requires Python 3.5.2 or later to run.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
import asyncio
|
||||
from functools import partial
|
||||
from typing import Optional, List, Tuple, Iterable, Callable, Union
|
||||
|
||||
from mpd.base import HELLO_PREFIX, ERROR_PREFIX, SUCCESS
|
||||
from mpd.base import MPDClientBase
|
||||
from mpd.base import MPDClient as SyncMPDClient
|
||||
from mpd.base import ProtocolError, ConnectionError, CommandError, CommandListError
|
||||
from mpd.base import mpd_command_provider
|
||||
|
||||
|
||||
class BaseCommandResult(asyncio.Future):
|
||||
"""A future that carries its command/args/callback with it for the
|
||||
convenience of passing it around to the command queue."""
|
||||
|
||||
def __init__(self, command, args, callback):
|
||||
super().__init__()
|
||||
self._command = command
|
||||
self._args = args
|
||||
self._callback = callback
|
||||
|
||||
async def _feed_from(self, mpdclient):
|
||||
while True:
|
||||
line = await mpdclient._read_line()
|
||||
self._feed_line(line)
|
||||
if line is None:
|
||||
return
|
||||
|
||||
|
||||
class CommandResult(BaseCommandResult):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.__spooled_lines = []
|
||||
|
||||
def _feed_line(self, line): # FIXME just inline?
|
||||
"""Put the given line into the callback machinery, and set the result on a None line."""
|
||||
if line is None:
|
||||
if self.cancelled():
|
||||
# Data was still pulled out of the connection, but the original
|
||||
# requester has cancelled the request -- no need to filter the
|
||||
# data through the preprocessing callback
|
||||
pass
|
||||
else:
|
||||
self.set_result(self._callback(self.__spooled_lines))
|
||||
else:
|
||||
self.__spooled_lines.append(line)
|
||||
|
||||
def _feed_error(self, error):
|
||||
if not self.done():
|
||||
self.set_exception(error)
|
||||
else:
|
||||
# These do occur (especially during the test suite run) when a
|
||||
# disconnect was already initialized, but the run task being
|
||||
# cancelled has not ever yielded at all and thus still needs to run
|
||||
# through to its first await point (which is then in a situation
|
||||
# where properties it'd like to access are already cleaned up,
|
||||
# resulting in an AttributeError)
|
||||
#
|
||||
# Rather than quenching them here, they are made visible (so that
|
||||
# other kinds of double errors raise visibly, even though none are
|
||||
# known right now); instead, the run loop yields initially with a
|
||||
# sleep(0) that ensures it can be cancelled properly at any time.
|
||||
raise error
|
||||
|
||||
class BinaryCommandResult(asyncio.Future):
|
||||
# Unlike the regular commands that defer to any callback that may be
|
||||
# defined for them, this uses the predefined _read_binary mechanism of the
|
||||
# mpdclient
|
||||
async def _feed_from(self, mpdclient):
|
||||
# Data must be pulled out no matter whether will later be ignored or not
|
||||
binary = await mpdclient._read_binary()
|
||||
if self.cancelled():
|
||||
pass
|
||||
else:
|
||||
self.set_result(binary)
|
||||
|
||||
_feed_error = CommandResult._feed_error
|
||||
|
||||
class CommandResultIterable(BaseCommandResult):
|
||||
"""Variant of CommandResult where the underlying callback is an
|
||||
asynchronous` generator, and can thus interpret lines as they come along.
|
||||
|
||||
The result can be used with the aiter interface (`async for`). If it is
|
||||
still used as a future instead, it eventually results in a list.
|
||||
|
||||
Commands used with this CommandResult must use their passed lines not like
|
||||
an iterable (as in the synchronous implementation), but as a asyncio.Queue.
|
||||
Furthermore, they must check whether the queue elements are exceptions, and
|
||||
raise them.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.__spooled_lines = asyncio.Queue()
|
||||
|
||||
def _feed_line(self, line):
|
||||
self.__spooled_lines.put_nowait(line)
|
||||
|
||||
_feed_error = _feed_line
|
||||
|
||||
def __await__(self):
|
||||
asyncio.Task(self.__feed_future())
|
||||
return super().__await__()
|
||||
|
||||
__iter__ = __await__ # for 'yield from' style invocation
|
||||
|
||||
async def __feed_future(self):
|
||||
result = []
|
||||
try:
|
||||
async for r in self:
|
||||
result.append(r)
|
||||
except Exception as e:
|
||||
self.set_exception(e)
|
||||
else:
|
||||
if not self.cancelled():
|
||||
self.set_result(result)
|
||||
|
||||
def __aiter__(self):
|
||||
if self.done():
|
||||
raise RuntimeError("Command result is already being consumed")
|
||||
return self._callback(self.__spooled_lines).__aiter__()
|
||||
|
||||
|
||||
@mpd_command_provider
|
||||
class MPDClient(MPDClientBase):
|
||||
__run_task = None # doubles as indicator for being connected
|
||||
|
||||
#: Indicator of whether there is a pending idle command that was not terminated yet.
|
||||
# When in doubt; this is True, thus erring at the side of caution (because
|
||||
# a "noidle" being sent while racing against an incoming idle notification
|
||||
# does no harm)
|
||||
__in_idle = False
|
||||
|
||||
#: Indicator that the last attempted idle failed.
|
||||
#
|
||||
# When set, IMMEDIATE_COMMAND_TIMEOUT is ignored in favor of waiting until
|
||||
# *something* else happens, and only then retried.
|
||||
#
|
||||
# Note that the only known condition in which this happens is when between
|
||||
# start of the connection and the presentation of credentials, more than
|
||||
# IMMEDIATE_COMMAND_TIMEOUT passes.
|
||||
__idle_failed = False
|
||||
|
||||
#: Seconds after a command's completion to send idle. Setting this too high
|
||||
# causes "blind spots" in the client's view of the server, setting it too
|
||||
# low sends needless idle/noidle after commands in quick succession.
|
||||
IMMEDIATE_COMMAND_TIMEOUT = 0.1
|
||||
|
||||
#: FIFO list of processors that may consume the read stream one after the
|
||||
# other
|
||||
#
|
||||
# As we don't have any other form of backpressure in the sending side
|
||||
# (which is not expected to be limited), its limit of COMMAND_QUEUE_LENGTH
|
||||
# serves as a limit against commands queuing up indefinitely. (It's not
|
||||
# *directly* throttling output, but as the convention is to put the
|
||||
# processor on the queue and then send the command, and commands are of
|
||||
# limited size, this is practically creating backpressure.)
|
||||
__command_queue = None
|
||||
|
||||
#: Construction size of __command_queue. The default limit is high enough
|
||||
# that a client can easily send off all existing commands simultaneously
|
||||
# without needlessly blocking the TCP flow, but small enough that
|
||||
# freespinning tasks create warnings.
|
||||
COMMAND_QUEUE_LENGTH = 128
|
||||
|
||||
#: Callbacks registered by any current callers of `idle()`.
|
||||
#
|
||||
# The first argument lists the changes that the caller is interested in
|
||||
# (and all current listeners' union is used to populate the `idle`
|
||||
# command's arguments), the latter is an actual callback that will be
|
||||
# passed either a set of changes or an exception.
|
||||
__idle_consumers: Optional[List[Tuple[
|
||||
Iterable[str],
|
||||
Callable[[Union[List[str], Exception]], None]
|
||||
]]] = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.__rfile = self.__wfile = None
|
||||
|
||||
async def connect(self, host, port=6600, loop=None):
|
||||
if loop is not None:
|
||||
warnings.warn("loop passed into MPDClient.connect is ignored, this will become an error", DeprecationWarning)
|
||||
if host.startswith("@"):
|
||||
host = "\0" + host[1:]
|
||||
if host.startswith("\0") or "/" in host:
|
||||
r, w = await asyncio.open_unix_connection(host)
|
||||
else:
|
||||
r, w = await asyncio.open_connection(host, port)
|
||||
self.__rfile, self.__wfile = r, w
|
||||
|
||||
self.__command_queue = asyncio.Queue(maxsize=self.COMMAND_QUEUE_LENGTH)
|
||||
self.__idle_consumers = [] #: list of (subsystem-list, callbacks) tuples
|
||||
|
||||
try:
|
||||
helloline = await asyncio.wait_for(self.__readline(), timeout=5)
|
||||
except asyncio.TimeoutError:
|
||||
self.disconnect()
|
||||
raise ConnectionError("No response from server while reading MPD hello")
|
||||
# FIXME should be reusable w/o reaching in
|
||||
SyncMPDClient._hello(self, helloline)
|
||||
|
||||
self.__run_task = asyncio.Task(self.__run())
|
||||
|
||||
@property
|
||||
def connected(self):
|
||||
return self.__run_task is not None
|
||||
|
||||
def disconnect(self):
|
||||
if (
|
||||
self.__run_task is not None
|
||||
): # is None eg. when connection fails in .connect()
|
||||
self.__run_task.cancel()
|
||||
if self.__wfile is not None:
|
||||
self.__wfile.close()
|
||||
self.__rfile = self.__wfile = None
|
||||
self.__run_task = None
|
||||
self.__command_queue = None
|
||||
if self.__idle_consumers is not None:
|
||||
# copying the list as each raising callback will remove itself from __idle_consumers
|
||||
for subsystems, callback in list(self.__idle_consumers):
|
||||
callback(ConnectionError())
|
||||
self.__idle_consumers = None
|
||||
|
||||
def _get_idle_interests(self):
|
||||
"""Accumulate a set of interests from the current __idle_consumers.
|
||||
Returns the union of their subscribed subjects, [] if at least one of
|
||||
them is the empty catch-all set, or None if there are no interests at
|
||||
all."""
|
||||
|
||||
if not self.__idle_consumers:
|
||||
return None
|
||||
if any(len(s) == 0 for (s, c) in self.__idle_consumers):
|
||||
return []
|
||||
return set.union(*(set(s) for (s, c) in self.__idle_consumers))
|
||||
|
||||
def _end_idle(self):
|
||||
"""If the main task is currently idling, make it leave idle and process
|
||||
the next command (if one is present) or just restart idle"""
|
||||
|
||||
if self.__in_idle:
|
||||
self.__write("noidle\n")
|
||||
self.__in_idle = False
|
||||
|
||||
async def __run(self):
|
||||
# See CommandResult._feed_error documentation
|
||||
await asyncio.sleep(0)
|
||||
result = None
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
result = await asyncio.wait_for(
|
||||
self.__command_queue.get(),
|
||||
timeout=self.IMMEDIATE_COMMAND_TIMEOUT,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
# The cancellation of the __command_queue.get() that happens
|
||||
# in this case is intended, and is just what asyncio.Queue
|
||||
# suggests for "get with timeout".
|
||||
|
||||
if not self.__command_queue.empty():
|
||||
# A __command_queue.put() has happened after the
|
||||
# asyncio.wait_for() timeout but before execution of
|
||||
# this coroutine resumed. Looping around again will
|
||||
# fetch the new entry from the queue.
|
||||
continue
|
||||
|
||||
if self.__idle_failed:
|
||||
# We could try for a more elaborate path where we now
|
||||
# await the command queue indefinitely, but as we're
|
||||
# already in an error case, this whole situation only
|
||||
# persists until the error is processed somewhere else,
|
||||
# so ticking once per idle timeout is OK to keep things
|
||||
# simple.
|
||||
continue
|
||||
|
||||
subsystems = self._get_idle_interests()
|
||||
if subsystems is None:
|
||||
# The presumably most quiet subsystem -- in this case,
|
||||
# idle is only used to keep the connection alive.
|
||||
subsystems = ["database"]
|
||||
|
||||
# Careful: There can't be any await points between the
|
||||
# except and here, or the sequence between the idle and the
|
||||
# command processor might be wrong.
|
||||
result = CommandResult("idle", subsystems, self._parse_list)
|
||||
result.add_done_callback(self.__idle_result)
|
||||
self.__in_idle = True
|
||||
self._write_command(result._command, result._args)
|
||||
|
||||
# A new command was issued, so there's a chance that whatever
|
||||
# made idle fail is now fixed.
|
||||
self.__idle_failed = False
|
||||
|
||||
try:
|
||||
await result._feed_from(self)
|
||||
except CommandError as e:
|
||||
result._feed_error(e)
|
||||
# This kind of error we can tolerate without breaking up
|
||||
# the connection; any other would fly out, be reported
|
||||
# through the result and terminate the connection
|
||||
|
||||
except Exception as e:
|
||||
# Prevent the destruction of the pending task in the shutdown
|
||||
# function -- it's just shutting down by itself.
|
||||
self.__run_task = None
|
||||
self.disconnect()
|
||||
|
||||
if result is not None:
|
||||
# The last command has failed: Forward that result.
|
||||
#
|
||||
# (In idle, that's fine too -- everyone watching see a
|
||||
# nonspecific event).
|
||||
result._feed_error(e)
|
||||
return
|
||||
else:
|
||||
raise
|
||||
# Typically this is a bug in mpd.asyncio.
|
||||
|
||||
def __idle_result(self, result):
|
||||
try:
|
||||
idle_changes = result.result()
|
||||
except CommandError as e:
|
||||
# Don't retry until something changed
|
||||
self.__idle_failed = True
|
||||
|
||||
# Not raising this any further: The callbacks are notified that
|
||||
# "something is up" (which is all their API gives), and whichever
|
||||
# command is issued to act on it will hopefully run into the same
|
||||
# condition.
|
||||
#
|
||||
# This does swallow the exact error cause.
|
||||
|
||||
idle_changes = set()
|
||||
for subsystems, _ in self.__idle_consumers:
|
||||
idle_changes = idle_changes.union(subsystems)
|
||||
|
||||
# make generator accessible multiple times
|
||||
idle_changes = list(idle_changes)
|
||||
|
||||
for subsystems, callback in self.__idle_consumers:
|
||||
if not subsystems or any(s in subsystems for s in idle_changes):
|
||||
callback(idle_changes)
|
||||
|
||||
# helper methods
|
||||
|
||||
async def __readline(self):
|
||||
"""Wrapper around .__rfile.readline that handles encoding"""
|
||||
data = await self.__rfile.readline()
|
||||
try:
|
||||
return data.decode("utf8")
|
||||
except UnicodeDecodeError:
|
||||
self.disconnect()
|
||||
raise ProtocolError("Invalid UTF8 received")
|
||||
|
||||
async def _read_chunk(self, length):
|
||||
try:
|
||||
return await self.__rfile.readexactly(length)
|
||||
except asyncio.IncompleteReadError:
|
||||
raise ConnectionError("Connection lost while reading binary")
|
||||
|
||||
def __write(self, text):
|
||||
"""Wrapper around .__wfile.write that handles encoding."""
|
||||
self.__wfile.write(text.encode("utf8"))
|
||||
|
||||
# copied and subtly modifiedstuff from base
|
||||
|
||||
# This is just a wrapper for the below.
|
||||
def _write_line(self, text):
|
||||
self.__write(text + "\n")
|
||||
|
||||
# FIXME This code should be shareable.
|
||||
_write_command = SyncMPDClient._write_command
|
||||
|
||||
async def _read_line(self):
|
||||
line = await self.__readline()
|
||||
if not line.endswith("\n"):
|
||||
raise ConnectionError("Connection lost while reading line")
|
||||
line = line.rstrip("\n")
|
||||
if line.startswith(ERROR_PREFIX):
|
||||
error = line[len(ERROR_PREFIX) :].strip()
|
||||
raise CommandError(error)
|
||||
if line == SUCCESS:
|
||||
return None
|
||||
return line
|
||||
|
||||
async def _parse_objects_direct(self, lines, delimiters=[], lookup_delimiter=False):
|
||||
obj = {}
|
||||
while True:
|
||||
line = await lines.get()
|
||||
if isinstance(line, BaseException):
|
||||
raise line
|
||||
if line is None:
|
||||
break
|
||||
key, value = self._parse_pair(line, separator=": ")
|
||||
key = key.lower()
|
||||
if lookup_delimiter and not delimiters:
|
||||
delimiters = [key]
|
||||
if obj:
|
||||
if key in delimiters:
|
||||
yield obj
|
||||
obj = {}
|
||||
elif key in obj:
|
||||
if not isinstance(obj[key], list):
|
||||
obj[key] = [obj[key], value]
|
||||
else:
|
||||
obj[key].append(value)
|
||||
continue
|
||||
obj[key] = value
|
||||
if obj:
|
||||
yield obj
|
||||
|
||||
async def _execute_binary(self, command, args):
|
||||
# Fun fact: By fetching data in lockstep, this is a bit less efficient
|
||||
# than it could be (which would be "after having received the first
|
||||
# chunk, guess that the other chunks are of equal size and request at
|
||||
# several multiples concurrently, ensuring the TCP connection can stay
|
||||
# full), but at the other hand it leaves the command queue empty so
|
||||
# that more time critical commands can be executed right away
|
||||
|
||||
data = None
|
||||
args = list(args)
|
||||
assert len(args) == 1
|
||||
args.append(0)
|
||||
final_metadata = None
|
||||
while True:
|
||||
partial_result = BinaryCommandResult()
|
||||
await self.__command_queue.put(partial_result)
|
||||
self._end_idle()
|
||||
self._write_command(command, args)
|
||||
metadata = await partial_result
|
||||
chunk = metadata.pop('binary', None)
|
||||
|
||||
if final_metadata is None:
|
||||
data = chunk
|
||||
final_metadata = metadata
|
||||
if not data:
|
||||
break
|
||||
try:
|
||||
size = int(final_metadata['size'])
|
||||
except KeyError:
|
||||
size = len(chunk)
|
||||
except ValueError:
|
||||
raise CommandError("Size data unsuitable for binary transfer")
|
||||
else:
|
||||
if metadata != final_metadata:
|
||||
raise CommandError("Metadata of binary data changed during transfer")
|
||||
if chunk is None:
|
||||
raise CommandError("Binary field vanished changed during transfer")
|
||||
data += chunk
|
||||
args[-1] = len(data)
|
||||
if len(data) > size:
|
||||
raise CommandListError("Binary data announced size exceeded")
|
||||
elif len(data) == size:
|
||||
break
|
||||
|
||||
if data is not None:
|
||||
final_metadata['binary'] = data
|
||||
|
||||
final_metadata.pop('size', None)
|
||||
|
||||
return final_metadata
|
||||
|
||||
# omits _read_chunk checking because the async version already
|
||||
# raises; otherwise it's just awaits sprinkled in
|
||||
async def _read_binary(self):
|
||||
obj = {}
|
||||
|
||||
while True:
|
||||
line = await self._read_line()
|
||||
if line is None:
|
||||
break
|
||||
|
||||
key, value = self._parse_pair(line, ": ")
|
||||
|
||||
if key == "binary":
|
||||
chunk_size = int(value)
|
||||
value = await self._read_chunk(chunk_size)
|
||||
|
||||
if await self.__rfile.readexactly(1) != b"\n":
|
||||
# newline after binary content
|
||||
self.disconnect()
|
||||
raise ConnectionError("Connection lost while reading line")
|
||||
|
||||
obj[key] = value
|
||||
return obj
|
||||
|
||||
# command provider interface
|
||||
@classmethod
|
||||
def add_command(cls, name, callback):
|
||||
if callback.mpd_commands_binary:
|
||||
async def f(self, *args):
|
||||
result = await self._execute_binary(name, args)
|
||||
|
||||
# With binary, the callback is applied to the final result
|
||||
# rather than to the iterator over the lines (cf.
|
||||
# MPDClient._execute_binary)
|
||||
return callback(self, result)
|
||||
else:
|
||||
command_class = (
|
||||
CommandResultIterable if callback.mpd_commands_direct else CommandResult
|
||||
)
|
||||
if hasattr(cls, name):
|
||||
# Idle and noidle are explicitly implemented, skipping them.
|
||||
return
|
||||
|
||||
def f(self, *args):
|
||||
result = command_class(name, args, partial(callback, self))
|
||||
if self.__run_task is None:
|
||||
raise ConnectionError("Can not send command to disconnected client")
|
||||
|
||||
try:
|
||||
self.__command_queue.put_nowait(result)
|
||||
except asyncio.QueueFull as e:
|
||||
e.args = ("Command queue overflowing; this indicates the"
|
||||
" application sending commands in an uncontrolled"
|
||||
" fashion without awaiting them, and typically"
|
||||
" indicates a memory leak.",)
|
||||
# While we *could* indicate to the queued result that it has
|
||||
# yet to send its request, that'd practically create a queue of
|
||||
# awaited items in the user application that's growing
|
||||
# unlimitedly, eliminating any chance of timely responses.
|
||||
# Furthermore, the author sees no practical use case that's not
|
||||
# violating MPD's guidance of "Do not manage a client-side copy
|
||||
# of MPD's database". If a use case *does* come up, any change
|
||||
# would need to maintain the property of providing backpressure
|
||||
# information. That would require an API change.
|
||||
raise
|
||||
|
||||
self._end_idle()
|
||||
# Careful: There can't be any await points between the queue
|
||||
# appending and the write
|
||||
try:
|
||||
self._write_command(result._command, result._args)
|
||||
except BaseException as e:
|
||||
self.disconnect()
|
||||
result.set_exception(e)
|
||||
return result
|
||||
|
||||
escaped_name = name.replace(" ", "_")
|
||||
f.__name__ = escaped_name
|
||||
setattr(cls, escaped_name, f)
|
||||
|
||||
# commands that just work differently
|
||||
async def idle(self, subsystems=()):
|
||||
if self.__idle_consumers is None:
|
||||
raise ConnectionError("Can not start idle on a disconnected client")
|
||||
|
||||
interests_before = self._get_idle_interests()
|
||||
# A queue accepting either a list of things that changed in a single
|
||||
# idle cycle, or an exception to be raised
|
||||
changes = asyncio.Queue()
|
||||
try:
|
||||
entry = (subsystems, changes.put_nowait)
|
||||
self.__idle_consumers.append(entry)
|
||||
if self._get_idle_interests != interests_before:
|
||||
# Technically this does not enter idle *immediately* but rather
|
||||
# only after any commands after IMMEDIATE_COMMAND_TIMEOUT;
|
||||
# practically that should be a good thing.
|
||||
self._end_idle()
|
||||
while True:
|
||||
item = await changes.get()
|
||||
if isinstance(item, Exception):
|
||||
raise item
|
||||
yield item
|
||||
finally:
|
||||
if self.__idle_consumers is not None:
|
||||
self.__idle_consumers.remove(entry)
|
||||
|
||||
def noidle(self):
|
||||
raise AttributeError("noidle is not supported / required in mpd.asyncio")
|
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import gi
|
||||
gi.require_version('Gtk', '3.0')
|
||||
from gi.repository import Gtk, GObject
|
||||
|
||||
from .image_button import ImageButton
|
||||
|
||||
class CtrlBox(Gtk.Box):
|
||||
|
||||
__gsignals__ = {
|
||||
'clicked': (GObject.SIGNAL_RUN_FIRST, None, (str,))
|
||||
}
|
||||
|
||||
def __init__(self, spacing = 6):
|
||||
Gtk.Box.__init__(self, spacing = spacing)
|
||||
|
||||
self.modes = ['./usr/share/sonist/all.png','./usr/share/sonist/rand.png','./usr/share/sonist/single.png']
|
||||
self.curr_mode = 0
|
||||
|
||||
self.mode_btn = ImageButton('./usr/share/sonist/all.png')
|
||||
self.prev_btn = ImageButton('./usr/share/sonist/prev.png')
|
||||
self.play_btn = ImageButton('./usr/share/sonist/pause.png', 48, 48)
|
||||
self.next_btn = ImageButton('./usr/share/sonist/next.png')
|
||||
self.vol_btn = ImageButton('./usr/share/sonist/volume.png')
|
||||
|
||||
self.pack_start(self.mode_btn, False, False, 0)
|
||||
self.pack_start(self.prev_btn, False, False, 0)
|
||||
self.pack_start(self.play_btn, False, False, 0)
|
||||
self.pack_start(self.next_btn, False, False, 0)
|
||||
self.pack_start(self.vol_btn, False, False, 0)
|
||||
|
||||
|
||||
self.mode_btn.connect('clicked', self.on_btn_clicked)
|
||||
self.prev_btn.connect('clicked', self.on_btn_clicked)
|
||||
self.play_btn.connect('clicked', self.on_btn_clicked)
|
||||
self.next_btn.connect('clicked', self.on_btn_clicked)
|
||||
self.vol_btn.connect('clicked', self.on_btn_clicked)
|
||||
|
||||
|
||||
def on_btn_clicked(self, btn):
|
||||
|
||||
if btn == self.play_btn:
|
||||
self.emit('clicked', 'play_btn')
|
||||
|
||||
elif btn == self.mode_btn:
|
||||
self.curr_mode += 1
|
||||
if self.curr_mode > 2:
|
||||
self.curr_mode = 0
|
||||
self.mode_btn.set_image(self.modes[self.curr_mode])
|
||||
self.emit('clicked', 'mode_btn')
|
||||
|
||||
elif btn == self.prev_btn:
|
||||
self.emit('clicked', 'prev_btn')
|
||||
|
||||
elif btn == self.next_btn:
|
||||
self.emit('clicked', 'next_btn')
|
||||
|
||||
elif btn == self.vol_btn:
|
||||
self.emit('clicked','vol_btn')
|
||||
|
||||
|
||||
def toggle_play_btn(self, on = True):
|
||||
if on:
|
||||
self.play_btn.set_image('./usr/share/sonist/play_a.png')
|
||||
else:
|
||||
self.play_btn.set_image('./usr/share/sonist/pause.png')
|
||||
|
||||
|
||||
def toggle_mode_btn(self, mode = 'single'):
|
||||
if mode == 'single':
|
||||
self.curr_mode = 2
|
||||
elif mode == 'random':
|
||||
self.curr_mode = 1
|
||||
else:
|
||||
self.curr_mode = 0
|
||||
|
||||
self.mode_btn.set_image(self.modes[self.curr_mode])
|
48
ui/image.py
|
@ -8,15 +8,24 @@ from gi.repository import Gtk, Gdk, GdkPixbuf
|
|||
class ScaleImage(Gtk.Image):
|
||||
def __init__(self, filepath):
|
||||
Gtk.Image.__init__(self)
|
||||
self.width = None
|
||||
self.height = None
|
||||
self.reset(filepath)
|
||||
|
||||
|
||||
def reset(self, filepath):
|
||||
self.origin = GdkPixbuf.Pixbuf.new_from_file(filepath)
|
||||
self.pixbuf = self.origin
|
||||
|
||||
self.width = self.origin.get_width()
|
||||
self.height = self.origin.get_height()
|
||||
if self.width is None:
|
||||
self.pixbuf = self.origin
|
||||
|
||||
self.set_from_pixbuf(self.origin)
|
||||
self.width = self.origin.get_width()
|
||||
self.height = self.origin.get_height()
|
||||
else:
|
||||
self.pixbuf = self.origin.scale_simple(self.width, self.height, GdkPixbuf.InterpType.BILINEAR)
|
||||
|
||||
self.set_from_pixbuf(self.pixbuf)
|
||||
return self
|
||||
|
||||
|
||||
def resize(self, width, height):
|
||||
|
@ -24,6 +33,7 @@ class ScaleImage(Gtk.Image):
|
|||
self.height = height
|
||||
self.pixbuf = self.origin.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)
|
||||
self.set_from_pixbuf(self.pixbuf)
|
||||
return self
|
||||
|
||||
|
||||
def set_radius(self, radius = 0):
|
||||
|
@ -34,19 +44,19 @@ class ScaleImage(Gtk.Image):
|
|||
Gdk.cairo_set_source_pixbuf(ctx, self.pixbuf, 0, 0)
|
||||
|
||||
# 左上角 圆角
|
||||
ctx.arc(radius, radius, radius, -math.pi, -math.pi / 2.)
|
||||
ctx.arc(radius, radius, radius, -math.pi, -math.pi / 2)
|
||||
ctx.line_to(w - radius, 0)
|
||||
|
||||
# 右上角 圆角
|
||||
ctx.arc(w - radius, radius, radius, -math.pi / 2., 0)
|
||||
ctx.arc(w - radius, radius, radius, -math.pi / 2, 0)
|
||||
ctx.line_to(w, -radius)
|
||||
|
||||
# 右下角 圆角
|
||||
ctx.arc(w - radius, h - radius, radius, 0, math.pi / 2.)
|
||||
ctx.arc(w - radius, h - radius, radius, 0, math.pi / 2)
|
||||
ctx.line_to(radius, h)
|
||||
|
||||
# 左下角 圆角
|
||||
ctx.arc(radius, h - radius, radius, math.pi / 2., math.pi)
|
||||
ctx.arc(radius, h - radius, radius, math.pi / 2, math.pi)
|
||||
ctx.close_path()
|
||||
|
||||
ctx.clip()
|
||||
|
@ -54,5 +64,27 @@ class ScaleImage(Gtk.Image):
|
|||
|
||||
pixbuf = Gdk.pixbuf_get_from_surface(surface, 0, 0, w, h)
|
||||
self.set_from_pixbuf(pixbuf)
|
||||
return self
|
||||
|
||||
|
||||
# 旋转不生效
|
||||
def rotate(self, deg = 0, point = None):
|
||||
w = self.width
|
||||
h = self.height
|
||||
surface = cairo.ImageSurface(cairo.Format.ARGB32, w, h)
|
||||
ctx = cairo.Context(surface)
|
||||
Gdk.cairo_set_source_pixbuf(ctx, self.pixbuf, 0, 0)
|
||||
|
||||
# 圆心坐标
|
||||
if point is None:
|
||||
point = (w / 2, h / 2)
|
||||
|
||||
ctx.translate(point[0], point[1])
|
||||
ctx.rotate((deg / 180) * math.pi)
|
||||
|
||||
ctx.paint()
|
||||
|
||||
pixbuf = Gdk.pixbuf_get_from_surface(surface, 0, 0, w, h)
|
||||
|
||||
self.set_from_pixbuf(pixbuf)
|
||||
return self
|
|
@ -10,28 +10,45 @@ class ImageButton(Gtk.Button):
|
|||
def __init__(self, filepath, width = 26, height = 26):
|
||||
Gtk.Button.__init__(self)
|
||||
|
||||
self.width = width
|
||||
self.height = height
|
||||
self._image_path = None
|
||||
|
||||
self.set_name('ImageButton')
|
||||
self.set_size_request(width, height)
|
||||
|
||||
image = ScaleImage(filepath)
|
||||
image.resize(width, height)
|
||||
self.set_valign(Gtk.Align.CENTER)
|
||||
|
||||
# 针对macos的设置, 但只解决了普通状态下的边框问题, 鼠标经过的样式还在
|
||||
self.set_relief(Gtk.ReliefStyle.NONE)
|
||||
|
||||
self.set_image(image)
|
||||
self.set_image(filepath)
|
||||
|
||||
css_provider = Gtk.CssProvider()
|
||||
style = f"""
|
||||
#ImageButton, #ImageButton:hover {{
|
||||
#ImageButton {{
|
||||
border: 0;
|
||||
border-radius: 50%;
|
||||
background-color: transparent;
|
||||
border-color:transparent;
|
||||
outline: transparent;
|
||||
}}
|
||||
#ImageButton:hover {{
|
||||
background-color: rgba(255,255,255,.1);
|
||||
}}
|
||||
"""
|
||||
css_provider.load_from_data(style.encode('UTF-8'))
|
||||
|
||||
context = self.get_style_context()
|
||||
path = context.get_path()
|
||||
context.add_provider(css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
|
||||
|
||||
def set_image(self, filepath):
|
||||
if self._image_path == filepath:
|
||||
return
|
||||
|
||||
self._image_path = filepath
|
||||
image = ScaleImage(filepath)
|
||||
image.resize(self.width, self.height)
|
||||
Gtk.Button.set_image(self, image)
|
||||
return self
|
|
@ -28,7 +28,8 @@ class Slider(Gtk.Scale):
|
|||
background-color: rgba(163, 190, 140, 0.75);
|
||||
}}
|
||||
#Slider slider {{
|
||||
background-color: rgba(163, 190, 140, 0.75);
|
||||
background-color: transparent;
|
||||
border-color: transparent;
|
||||
outline: none;
|
||||
}}
|
||||
"""
|
||||
|
|
12
ui/text.py
|
@ -5,19 +5,17 @@ gi.require_version('Gtk', '3.0')
|
|||
from gi.repository import Gtk
|
||||
|
||||
|
||||
class TextBox(Gtk.EventBox):
|
||||
class TextBox(Gtk.Box):
|
||||
def __init__(self, width, height):
|
||||
Gtk.EventBox.__init__(self)
|
||||
Gtk.Box.__init__(self)
|
||||
|
||||
self.set_size_request(width, height)
|
||||
|
||||
self.label = Gtk.Label()
|
||||
|
||||
align = Gtk.Alignment(xalign=0.5, yalign=0.5)
|
||||
|
||||
self.add(self.label)
|
||||
self.add(align)
|
||||
|
||||
|
||||
|
||||
def set_text(self, string):
|
||||
self.label.set_text('孙晓 - 丹歌惊鸿')
|
||||
self.label.set_text('孙晓 - 丹歌惊鸿')
|
||||
return self
|
Before Width: | Height: | Size: 3.3 KiB |
After Width: | Height: | Size: 5.4 KiB |
After Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 3.8 KiB |
Before Width: | Height: | Size: 3.2 KiB |
Before Width: | Height: | Size: 3.6 KiB |
Before Width: | Height: | Size: 4.0 KiB After Width: | Height: | Size: 1.7 KiB |
Before Width: | Height: | Size: 3.0 KiB |
Before Width: | Height: | Size: 3.2 KiB |
Before Width: | Height: | Size: 3.0 KiB |
Before Width: | Height: | Size: 3.2 KiB |
170
window.py
|
@ -12,17 +12,23 @@ from ui.image import ScaleImage
|
|||
from ui.slider import Slider
|
||||
from ui.image_button import ImageButton
|
||||
from ui.text import TextBox
|
||||
from ui.ctrl_box import CtrlBox
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class SonistWindow(Gtk.Window):
|
||||
def __init__(self):
|
||||
def __init__(self, app):
|
||||
Gtk.Window.__init__(self)
|
||||
|
||||
self.app = app
|
||||
|
||||
self.connect("destroy", self.quit)
|
||||
|
||||
self.set_name('SonistWindow')
|
||||
self.set_default_size(320, 384)
|
||||
self.set_resizable(False)
|
||||
self.set_wmclass('Sonist', 'Sonist')
|
||||
|
||||
self.set_opacity(0.9)
|
||||
|
@ -32,8 +38,6 @@ class SonistWindow(Gtk.Window):
|
|||
# album_img = './usr/share/sonist/album.png'
|
||||
album_img = './usr/share/sonist/avatar.jpg'
|
||||
|
||||
self.connect("destroy", self.all_quit)
|
||||
|
||||
self.set_background_image(album_img)
|
||||
|
||||
layout = Gtk.Layout()
|
||||
|
@ -44,57 +48,48 @@ class SonistWindow(Gtk.Window):
|
|||
|
||||
# 唱片
|
||||
disk = ScaleImage('./usr/share/sonist/disk.png')
|
||||
handler = ScaleImage('./usr/share/sonist/handler.png')
|
||||
album = ScaleImage(album_img)
|
||||
|
||||
disk.resize(192, 192)
|
||||
album.resize(128, 128)
|
||||
album.set_radius(64)
|
||||
album.resize(128, 128).set_radius(64)
|
||||
|
||||
handler.resize(48, 96)
|
||||
self.handler = handler
|
||||
self.album = album
|
||||
|
||||
box = Gtk.Fixed()
|
||||
box.put(disk, 0, 0)
|
||||
box.put(album, 32, 32)
|
||||
box.put(disk, 16, 16)
|
||||
box.put(album, 48, 48)
|
||||
box.put(handler, 0, 16)
|
||||
|
||||
layout.put(box, 64, 32)
|
||||
layout.put(box, 48, 16)
|
||||
|
||||
|
||||
# title
|
||||
|
||||
title_box = TextBox(256, 20)
|
||||
title_box.set_text('孙晓 - 丹歌惊鸿')
|
||||
# self.title_box = TextBox(256, 20)
|
||||
self.title_box = Gtk.Label()
|
||||
self.title_box.set_text('孙晓 - 丹歌惊鸿')
|
||||
|
||||
layout.put(title_box, 32, 244)
|
||||
layout.put(self.title_box, 32, 244)
|
||||
|
||||
|
||||
# 播放进度
|
||||
slider = Slider()
|
||||
layout.put(slider, 32, 270)
|
||||
slider = Slider(272)
|
||||
layout.put(slider, 24, 270)
|
||||
|
||||
|
||||
# 控制条
|
||||
ctrl_box = Gtk.Box(spacing = 6)
|
||||
all = ImageButton('./usr/share/sonist/all.png')
|
||||
# rand = ImageButton('./usr/share/sonist/rand.png')
|
||||
prev = ImageButton('./usr/share/sonist/prev.png')
|
||||
pause = ImageButton('./usr/share/sonist/pause.png', 48, 48)
|
||||
next = ImageButton('./usr/share/sonist/next.png')
|
||||
volume = ImageButton('./usr/share/sonist/volume.png')
|
||||
self.ctrl_box = CtrlBox()
|
||||
self.ctrl_box.connect('clicked', self.ctrl_clicked)
|
||||
|
||||
ctrl_box.pack_start(all, True, True, 0)
|
||||
# ctrl_box.pack_start(rand, True, True, 0)
|
||||
ctrl_box.pack_start(prev, True, True, 0)
|
||||
ctrl_box.pack_start(pause, True, True, 0)
|
||||
ctrl_box.pack_start(next, True, True, 0)
|
||||
ctrl_box.pack_start(volume, True, True, 0)
|
||||
|
||||
layout.put(ctrl_box, 48, 300)
|
||||
|
||||
|
||||
|
||||
|
||||
layout.put(self.ctrl_box, 48, 300)
|
||||
|
||||
self.add(layout)
|
||||
|
||||
self.show_all()
|
||||
self.sync_state(True)
|
||||
|
||||
|
||||
|
||||
def set_background_image(self, filepath):
|
||||
|
@ -117,7 +112,110 @@ class SonistWindow(Gtk.Window):
|
|||
context.add_provider_for_screen(screen, css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
|
||||
|
||||
|
||||
def ctrl_clicked(self, box, btn):
|
||||
match(btn):
|
||||
case 'play_btn':
|
||||
self.toggle_play()
|
||||
|
||||
case 'mode_btn':
|
||||
# repeat all
|
||||
if self.ctrl_box.curr_mode == 0:
|
||||
self.app.mpd.repeat(1)
|
||||
self.app.mpd.random(0)
|
||||
self.app.mpd.single(0)
|
||||
# random
|
||||
elif self.ctrl_box.curr_mode == 1:
|
||||
self.app.mpd.repeat(0)
|
||||
self.app.mpd.random(1)
|
||||
self.app.mpd.single(0)
|
||||
# single
|
||||
else:
|
||||
self.app.mpd.repeat(0)
|
||||
self.app.mpd.random(0)
|
||||
self.app.mpd.single(1)
|
||||
|
||||
case 'prev_btn':
|
||||
self.prev_song()
|
||||
|
||||
case 'next_btn':
|
||||
self.next_song()
|
||||
|
||||
case 'vol_btn':
|
||||
self.toggle_play()
|
||||
|
||||
|
||||
|
||||
def toggle_play(self):
|
||||
if self.stat.get('state') == 'stop':
|
||||
self.app.mpd.play()
|
||||
else:
|
||||
self.app.mpd.pause()
|
||||
|
||||
self.sync_state()
|
||||
self.update_play_stat(self.stat.get('state') == 'play')
|
||||
|
||||
|
||||
def prev_song(self):
|
||||
self.app.mpd.previous()
|
||||
self.sync_state()
|
||||
|
||||
def next_song(self):
|
||||
self.app.mpd.next()
|
||||
self.sync_state()
|
||||
|
||||
|
||||
def update_play_stat(self, played = True):
|
||||
if played:
|
||||
self.handler.reset('./usr/share/sonist/handler_a.png')
|
||||
else:
|
||||
self.handler.reset('./usr/share/sonist/handler.png')
|
||||
|
||||
# 切换播放按钮状态
|
||||
self.ctrl_box.toggle_play_btn(played)
|
||||
|
||||
|
||||
def sync_state(self, first = False):
|
||||
self.stat = self.app.mpd.status()
|
||||
|
||||
played = self.stat.get('state')
|
||||
song = self.app.mpd.currentsong()
|
||||
|
||||
if first:
|
||||
self.update_play_stat(played == 'play')
|
||||
|
||||
if self.stat.get('single') == '1':
|
||||
self.ctrl_box.toggle_mode_btn(mode = 'single')
|
||||
elif self.stat.get('random') == '1':
|
||||
self.ctrl_box.toggle_mode_btn(mode = 'random')
|
||||
|
||||
# 更新歌曲信息
|
||||
self.title_box.set_text("%s - %s" % (song.get('artist'), song.get('title')))
|
||||
|
||||
filepath = f"./album/{song['title']}.png"
|
||||
|
||||
# print(self.stat)
|
||||
print(song)
|
||||
|
||||
|
||||
if os.path.isfile(filepath):
|
||||
self.update_album(filepath)
|
||||
else:
|
||||
buff = self.app.get_cover(song, filepath, self.update_album)
|
||||
# print(buff)
|
||||
# with open(filepath, 'wb') as file:
|
||||
# output = file.write(buff)
|
||||
|
||||
# self.set_background_image(filepath)
|
||||
|
||||
|
||||
|
||||
# print(self.app.mpd.readpicture(self.stat.get('songid')))
|
||||
|
||||
|
||||
def update_album(self, filepath):
|
||||
self.set_background_image(filepath)
|
||||
self.album.reset(filepath).set_radius(64)
|
||||
|
||||
def quit(self, win):
|
||||
self.app.remove_window(self)
|
||||
|
||||
def all_quit(self, win):
|
||||
print('朕要休息了~~~')
|
||||
Gtk.main_quit()
|