initial commit

This commit is contained in:
Tyrel Souza 2022-10-16 21:59:22 -04:00
commit 2ac1fc332a
44 changed files with 1368 additions and 0 deletions

168
.gitignore vendored Normal file
View File

@ -0,0 +1,168 @@
tags
# Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# End of https://www.toptal.com/developers/gitignore/api/python

89
Makefile Normal file
View File

@ -0,0 +1,89 @@
PY?=
PELICAN?=pelican
PELICANOPTS=
BASEDIR=$(CURDIR)
INPUTDIR=$(BASEDIR)/content
OUTPUTDIR=$(BASEDIR)/output
CONFFILE=$(BASEDIR)/pelicanconf.py
PUBLISHCONF=$(BASEDIR)/publishconf.py
SSH_HOST=localhost
SSH_PORT=22
SSH_USER=tyrel
SSH_TARGET_DIR=/Users/tyrel/pelican-site
DEBUG ?= 0
ifeq ($(DEBUG), 1)
PELICANOPTS += -D
endif
RELATIVE ?= 0
ifeq ($(RELATIVE), 1)
PELICANOPTS += --relative-urls
endif
SERVER ?= "0.0.0.0"
PORT ?= 0
ifneq ($(PORT), 0)
PELICANOPTS += -p $(PORT)
endif
help:
@echo 'Makefile for a pelican Web site '
@echo ' '
@echo 'Usage: '
@echo ' make html (re)generate the web site '
@echo ' make clean remove the generated files '
@echo ' make regenerate regenerate files upon modification '
@echo ' make publish generate using production settings '
@echo ' make serve [PORT=8000] serve site at http://localhost:8000'
@echo ' make serve-global [SERVER=0.0.0.0] serve (as root) to $(SERVER):80 '
@echo ' make devserver [PORT=8000] serve and regenerate together '
@echo ' make devserver-global regenerate and serve on 0.0.0.0 '
@echo ' make ssh_upload upload the web site via SSH '
@echo ' make sftp_upload upload the web site via SFTP '
@echo ' make rsync_upload upload the web site via rsync+ssh '
@echo ' '
@echo 'Set the DEBUG variable to 1 to enable debugging, e.g. make DEBUG=1 html '
@echo 'Set the RELATIVE variable to 1 to enable relative urls '
@echo ' '
html:
"$(PELICAN)" "$(INPUTDIR)" -o "$(OUTPUTDIR)" -s "$(CONFFILE)" $(PELICANOPTS)
clean:
[ ! -d "$(OUTPUTDIR)" ] || rm -rf "$(OUTPUTDIR)"
regenerate:
"$(PELICAN)" -r "$(INPUTDIR)" -o "$(OUTPUTDIR)" -s "$(CONFFILE)" $(PELICANOPTS)
serve:
"$(PELICAN)" -l "$(INPUTDIR)" -o "$(OUTPUTDIR)" -s "$(CONFFILE)" $(PELICANOPTS)
serve-global:
"$(PELICAN)" -l "$(INPUTDIR)" -o "$(OUTPUTDIR)" -s "$(CONFFILE)" $(PELICANOPTS) -b $(SERVER)
devserver:
"$(PELICAN)" -lr "$(INPUTDIR)" -o "$(OUTPUTDIR)" -s "$(CONFFILE)" $(PELICANOPTS)
devserver-global:
$(PELICAN) -lr $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS) -b 0.0.0.0
publish:
"$(PELICAN)" "$(INPUTDIR)" -o "$(OUTPUTDIR)" -s "$(PUBLISHCONF)" $(PELICANOPTS)
ssh_upload: publish
scp -P $(SSH_PORT) -r "$(OUTPUTDIR)"/* "$(SSH_USER)@$(SSH_HOST):$(SSH_TARGET_DIR)"
sftp_upload: publish
printf 'put -r $(OUTPUTDIR)/*' | sftp $(SSH_USER)@$(SSH_HOST):$(SSH_TARGET_DIR)
rsync_upload: publish
rsync -e "ssh -p $(SSH_PORT)" -P -rvzc --include tags --cvs-exclude --delete "$(OUTPUTDIR)"/ "$(SSH_USER)@$(SSH_HOST):$(SSH_TARGET_DIR)"
.PHONY: html help clean regenerate serve serve-global devserver publish ssh_upload rsync_upload

BIN
content/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,42 @@
Python Progress Bar
###################
:date: 2011-12-21 03:52
:author: tyrel
:category: Python
:slug: python-progress-bar
:status: published
I was looking for a nice progress bar today at work to show progress rather than just printing “\ **Waiting 30 seconds…**\ ” and having the script do nothing, I wanted to have a progress bar show.
I found a progress bar from `Corey Goldberg <http://code.google.com/p/corey-projects/source/browse/trunk/python2/progress_bar.py>`__
I did make a couple changes, and have uploaded my changes to my GitHub account.
newPythonProgressBar [deadlink]
To use this progressbar, it is very easy.
.. code:: python
# To Setup
from progress_bar import ProgressBar
import sys
import time
def updateBar(step):
p.update_time(step)
sys.stdout.write("%s\r" % p)
sys.stdout.flush()
#
# to call
#
wait_time = 100 # seconds
p = ProgressBar(wait_time)
p.empty_char = "."
p.unit = "^"
for step in range(wait_time+1):
updateBar(step)
time.sleep(1)
It will look like this when you use it
``[###...............7%..................] 7^/100^``

View File

@ -0,0 +1,24 @@
Custom Django URLField
######################
:date: 2012-01-05 03:55
:author: tyrel
:category: Python
:slug: custom-django-urlfield
:status: published
For work I had to write a custom url model field. This model field when setting up accepts a default protocol, and a list of other protocols.
| When checking the protocol, the url is split by “://”. If the split has one or two parts, then the url is validly formed.
| In the event of a single element split, there is no protocol specified. When there is no protocol, the url is prepended with the default protocol specified. If there is a protocol, it is checked to make sure it exists in a union of the default protocol and other protocols. If it is not, a ValidationError is raised letting the user know that the protocol is not accepted.
This can all be found at On my github [deadlink].
| I have a couple ways I could have done this better and probably will. Improvements would be just one parameter called parameters in which it is checked if there is at least one element. Passing this, when there is no protocol specified, the first element is the default one.
| This would be a little cleaner.
| this example would allow for http, https, ssh, spdy and mailto, anything else would error out.
| ``facebook_page = URLField(default_protocol="http",``
| ``protocols=["https","ssh","spdy","mailto"])``
| The way I could improve this would be
| ``facebook_page = URLField(protocols=["https","https","ssh","spdy","mailto"])``

View File

@ -0,0 +1,41 @@
You can un-expire a GPG key.
############################
:date: 2012-01-13 03:54
:author: tyrel
:category: Linux
:slug: you-can-un-expire-a-gpg-key
:status: published
Today we had a problem at work on a system. Without getting into too much detail as to give away secrets behind the verbal NDA I am behind, I will just say that it had to do with a GPG public key of mine that was expired on a dev machine, accidentally propagating during install to a production machine.
This key had a sub key as well, so figuring out this was tricky.
To start, you can list your gpg keys like so
``gpg --list-keys``
| This will list keys such as
| ``pub 4096R/01A53981 2011-11-09 [expires: 2016-11-07]``
| ``uid         Tyrel Anthony Souza (Five year key for email.)``
| ``sub 4096R/C482F56D 2011-11-09 [expires: 2016-11-07]``
| To make this not expire, (same steps to change expiration date to another time), you must first edit the key
| ``gpg --edit-key 01A53981``
| You will then see a gpg prompt
``gpg>``
| Type “expire” in and you will be prompted for how long to change it to
| ``Changing expiration time for the primary key.``
| ``Please specify how long the key should be valid.``
| ``        0 = key does not expire``
| ``      <n> = key expires in n days``
| ``     <n>w = key expires in n weeks``
| ``     <n>m = key expires in n months``
| ``     <n>y = key expires in n years``
| You are then done setting the expiration on the primary key, if you have sub key, doing this is as easy as typing
| ``key 1``
| and repeating the expiration step.
To finish and wrap things up, type ``save`` and you are done.

View File

@ -0,0 +1,25 @@
Some BASH tips
##############
:date: 2012-03-08 03:56
:author: tyrel
:category: Linux
:slug: some-bash-tips
:status: published
I realize I havent updated in a while. I havent had much free time recently as Ive been working on a project for my father in C# after work hours. This is a great change from only working in Python and JavaScript recently. Im making a program that will analyze test results from a plasma torch for a company called HyperTherm. My father built the physical machine, but the employees want something that they can better see the results of a passed torch unit, or a failed torch unit. This program has a bar code scanner that scans the tool used in the test and matches it up to the lot of torch parts. Another added feature is the ability to print a white label that says “UNIT PASSED” or a giant red label that says the unit failed and which of the 8 tests failed were. I had to learn how to use delegates, as my serial event listener is on a separate thread and I cant update labels, or parts of the User Interface without them.  Still working on it, hopefully will wrap it up by Saint Patricks day.
I recently found a cool command in BASH that I hadnt previously known.  C-o will execute the current line, and then bring the following line up from BASH history. If you have a set of commands you want to execute again, rather than having to press up 20 times, hit enter, press up 19 times, hit enter, and so on… You can just hit up 20 times. Press C-o as many times as you need to.
| For example
.. code:: console
tacidsky@ip-234:~$ touch a
tacidsky@ip-234:~$ touch b
tacidsky@ip-234:~$ touch c
# [up] [up] [up]
tacidsky@ip-234:~$ touch a [C-o]
tacidsky@ip-234:~$ touch b [C-o]
tacidsky@ip-234:~$ touch c [C-o]
As you can see there, all I had to do was go back to the \`touch a\` line, and hit control-o three times and it touched the files again!

View File

@ -0,0 +1,28 @@
Harry Delmolino
###############
:date: 2012-05-25 03:56
:author: tyrel
:category: Personal
:slug: harry-delmolino
:status: published
I met this random kid online on IRC a year and a half ago (I believe it was December 19th, 2010). His name was HarryD. We got talking and one time he mentioned that he was going to hike Mount Monadnock. That is near me so we got talking and he said he lived near North Hampton, MA. That was cool that I met some random kid who lived near me. He was only 17 at the time.
Eventually we met up because my new girlfriend lived near NoHo in Holyoke. One day I went down to see her and met up with him. He was wearing all brown, so I joked that he was a UPS delivery guy. We hung out for a bit, I think I met his friend Sam that day. Then I left and went home. For the next year we would hang occasionally, usually I would go down and visit NoHo because it was easy for him to Bike to and he worked there. We went bowling, once or twice, and he came up for this super bowl to watch Star Wars instead because screw sports!
I think that was the last time I saw him.
On this Saturday, May 19th, 2012, he was riding his bicycle in North Hampton and collided with a car, sending him flying off and smacking his head on the edge of the curb. There are some other details which I am not 100% sure about. He then was in the hospital until Tuesday May 22, 2012 at which time he passed away.
We used to talk every day, either on IRC, AIM or gTalk. His account on IRC (no_numbers_here) is still active because his VPS for http://harry.is is still paid for and online until the next billing period. Its so sad seeing his name online and not being able to send a “hey dood” or some other random greeting, then start talking about computers, python, bikes, etc.
Harry was an avid cyclist. I am reading stories that even if he had to go thirty feet, he would hop on his bike to get there. He got me interested in cycling as well. He was going to sell me a bike, but the I was talking to a friend and he gave me on, so I never bought it. Which as it turns out was good as he wanted to give that to his new girlfriend.
I was planning on hanging out with him next weekend, as he was busy with something this weekend that I cant remember. I wanted to take him target shooting, and wanted to eventually be in enough shape to go hiking with him. None of this ever came to fruition.
Harry Delmolino, we may not have been as close as we could have been, but you definitely made a difference in my life for the better and I never got to thank you for this.
| Edit:
| I went to the Calling Hours today and I was only there maybe a half hour, but there were so many people there. Its amazing that a man so young has touched so many lives of so many people, and had accomplished so much. People might say “Oh we was just a kid, he was only 18″ but if they looked at the accomplishments of this young man, they would realize how grown up he was.
I think his mother and father might eventually delete his Facebook, so I went back and took a screenshot of one of our last conversations so I can remember. Everything he said usually warranted a laugh.

View File

@ -0,0 +1,42 @@
CFEngine3 Install on CentOS 5.7
###############################
:date: 2012-11-07 03:57
:author: tyrel
:category: outdated
:slug: cfengine3-install-on-centos-5-7
:status: published
| Today I was tasked with installing CFEngine3 on CentOS-5.7 (A little outdated). When installing CFEngine-3.3.1 I kept getting an error that I couldnt find libtokyocabinet.so.9. I had to set my prefix to /usr/ because the location that tokyocabinet was installing my libraries to was not being read by CFEngines make script.
| To do this (I am using tokyocabinet 1.4.47)
.. code:: console
wget http://fallabs.com/tokyocabinet/tokyocabinet-1.4.47.tar.gz
tar -xzvf tokyocabinet-1.4.47.tar.gz
cd tokyocabinet-1.4.47/
./configure --prefix=/usr/
make
sudo make install
Then I was able to ./configure && make && make install cfengine3 without any problems.
So my overall script looked something like this:
.. code:: console
wget http://fallabs.com/tokyocabinet/tokyocabinet-1.4.47.tar.gz
tar -xzvf tokyocabinet-1.4.47.tar.gz
cd tokyocabinet-1.4.47/
./configure --prefix=/usr/
make
sudo make install
.. code:: console
wget http://cfengine.com/source-code/download?file=cfengine-3.3.1.tar.gz
tar -xzvf cfengine-3.3.1.tar.gz
cd cfengine-3.3.1
./configure
make
sudo make install
sudo cp /var/cfengine/bin/cf-* /usr/bin/

View File

@ -0,0 +1,62 @@
Getting started in Python Part 1
################################
:date: 2013-07-02 03:59
:author: tyrel
:category: Python
:slug: getting-started-in-python-part-1
:status: published
I have a friend who is interested in becoming a Python developer. He has some Python experience with CodeAcademy, but he of course wants to take this a step further and develop on his own computer. I figure Id give him a few pointers, and I know this has been rehashed a million times, but what the hell, why not blog on it again.
There are a few important things to learn besides the actual language itself. The first I am going to discuss is has to deal with installing packages, then followed up closely with Pythons path trickery. Finally Im going to wrap up by discussing some software related to development, that could be used for any language, but I use daily in my work as a Python Software Engineer. Lets get started.
PIP
---
Python is a wonderful language, but how useful would it be if you had to rewrite everything by hand? Not useful at all. Thats why the lovely pip developers were born. `PIP <https://pypi.python.org/pypi/pip>`__ (executable pip) is a package manager written for Python. Its very simple to use, and in my opinion is way better than easy_install. To use pip you need to know at a minimum three commands.
| *pip install*
| This command does exactly what it says on the box. It queries PyPI (Python Package Index) and downloads the latest version of the package on the server. It then installs it to your site-packages.
| *pip uninstall*
| This deletes all files associated with the package supplied. 100% simple.
| *pip freeze*
| This shows what packages are installed on your system and what versions. If you supply local it will show what packages are installed in your current environment.
| These three commands will get you started with package management, there are more commands you can find by looking through the help documents.
Virtualenv
----------
| If you notice I mentioned a current environment in my previous *pip freeze* explanation, here is why. Python has a default place that it looks when you reference a package. This is generally in something like **/usr/lib/python2.7/site-packages/** or **C:\\Python27\\lib\\**. There is a set of scripts called *virtualenv* that creates an environment where you run it with a complete copy of your Python executable, and a blank (unless you copy them over) site-packages directory. You can then install any packages there activate the virtual environment. When activated you use those specific versions, no matter the version of what is installed on your system.
| Lets show an example of the first time use of *virtualenv*:
.. code:: console
$ sudo pip install virtualenv # Only time you might need sudo, try without first.
$ virtualenv myenv # Create the virtual environment
$ source myenv/bin/activate # Activate the virtual environment
(myenv)$ python -c "import MYPACKAGE; print MYPACKAGE"
Notice how it says your package is not in **/usr/lib/python2.7/site-packages/** ? Thats because youre using *virtualenv* to tell your copied python to use that library instead. There are many reasons you would want to use a virtual environment. The most frequent reason is to preserve version numbers of installed packages between a production and a development environment. Another reason virtualenv is useful if you do not have the power to install packages on your system, you can create a virtual environment and install them there.
Virtualenvwrapper
-----------------
After you create a virtual environment, you just run *source bin/activate*\ and it will activate the virtual environment. This can get tedious knowing exactly where your virtual environments are all the time, so some developers wrote some awesome scripts to fix that problem. This is called *virtualenvwrapper *\ and once you use it once, you will always want to use it more. What it does is that it has you create a hidden directory in your home directory, set that to an environment variable and references that directory as the basis for your virtual environments. The installation of this is pretty easy, you can *pip install virtualenvwrapper* if you want, or download the package and compile by hand. Once installed correctly, you can run the command *mkvirtualenv envname* to create a virtual environment. You can then run *workon envname* from anywhere, and it will activate that environment. For example, you could be at **/var/www/vhosts/www.mysite.com/django/** and run *workon envname* and it would activate the environment from there. This isnt a required package (none of them are really…) as I went a couple years without using *virtualenvwrapper, *\ but it is very useful and now I use it every day. Some tips I use with my setup of *virtualenvwrapper* is that I use the postactivate scripts to automatically try to change into the proper project directory of my environment. This also means I usually name my *virtualenv * after my project name for easy memory. It makes no sense to have a project called “cash_register” but the *virtualenv* be called “fez”. This is how I change to the right project after activating my *virtualenv.* This goes in **$WORKON_HOME/postactivate**
.. code:: bash
#!/bin/bash
# This hook is run after every virtualenv is activated.
# if its a work or a personal project (Example)
proj_name=$(echo $VIRTUAL_ENV|awk -F'/' '{print $NF}')
if [[ -e "/Users/tsouza/PersonalProjects/$proj_name" ]]
then
cd ~/PersonalProjects/$proj_name
else
cd ~/WorkProjects/$proj_name
fi
This about wraps up part one of this two part blog series. Next time I will discuss how to use Git and how to configure SublimeText2 and Aptana Studio for use with Python. Stay tuned!

View File

@ -0,0 +1,31 @@
Help, I have too many Django ManyToMany Queries [FIXED]
#######################################################
:date: 2013-08-06 04:00
:author: tyrel
:category: Python
:slug: help-i-have-too-many-django-manytomany-queries-fixed
:status: published
My boss tasked me with getting the load time of 90 seconds(HOLY CARP!) on one page down. First thing I did was install the Django Debug Toolbar to see what was really happening.
There are currently 2,000 users in the database, the way our model is setup is that a UserProfile can have other UserProfiles attached to it in one of three M2M relations, which in the Django Admin would cause 2,000 queries PER M2M field. This is very expensive as obviously you dont want 10,000 queries that each take 0.3ms to take place.
The solution, after a day and a half of research is to override the **formfield_for_manytomany** method in the Admin class for our UserProfile object.
Our solution is to prefetch for any M2M that are related to the current Model.
.. code:: python
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.__class__.__name__ == "ManyToManyField" and \
db_field.rel.to.__name__ == self.model.__name__:
kwargs['queryset'] = db_field.rel.to.objects.prefetch_related("user")
return super(UserProfileInline, self).formfield_for_manytomany(
db_field, request, **kwargs)
This goes inside our admin class **UserProfileInline(admin.StackedInline)**. Simple clean and easy to drop into another ModelAdmin with minimal changes.
Other things I pondered was to set all our M2Ms as raw_id_fields, then using Select2 or Chosen, query our UserProfiles when the related users were being selected. This would take a lot of load off the initial page load, but is more of a bandaid rather than a real fix.
I tried to override the Admin classs **def queryset(self, request):** but this was not affecting anything.

View File

@ -0,0 +1,35 @@
How to not trigger a post_save in Django, but still modify data.
################################################################
:date: 2013-11-13 03:58
:author: tyrel
:category: Django
:slug: how-to-not-trigger-a-post_save-in-django-but-still-modify-data
:status: published
Recently I have been diving into using signals with Django, which of course are pretty neat.
I am working on a website for work which in the most basic explanation, is a task management site. Recently I have added in the ability to subscribe to tasks and get emails, I did this by connecting to the post_save signal. I only email out when a task is changed, not created (of course, no one would be subscribed to it). This worked flawlessly and “emails” out to anyone who is subscribed. I say that in quotes, because I havent actually hooked it up to a real SMTP server, and only use
.. code:: console
python -m smtpd -n -c DebuggingServer localhost:1025
which will output any emails to stdout.  But I digress… A problem arose when I was working on ordering tasks.
I store an integer in the “ordering” column, which any authenticated user can drag the row to a new location and that will reorder the task. I did this after I setup the emailing signal, so I didnt think about an email being sent out for EVERY task being changed.
I tried a lot of different things, and was debating some that would be a bit messy. Among those ideas were trying to store the past values in another table, but that would get expensive fast. The reason I tried this was because I wanted to see if the ordering was the only thing that changed, and if that was the case, not send an email. I eventually found a thread on StackOverflow that says to use update on the queryset to not trigger the signal.
You can do this by doing something like this:
.. code:: python
from app.models import ModelName
def reorder(request):
new_order = request.POST.get('new_order',None)
pk = request.POST.get('modelname_pk',None)
if new_order:
ModelName.objects.filter(pk=pk).update(ordering=new_order)
I am not sure if this is the proper way save changes and not trigger a post_save signal, but this is the way that worked for me so I figured I would document this.

View File

@ -0,0 +1,40 @@
Readline
########
:date: 2014-06-21 04:01
:author: tyrel
:category: Linux
:slug: readline
:status: published
A lot of times when I stop at someones computer and help them in the terminal, I use a Readline command and people say “How the heck did you do that?”
Let me first backup and explain what Readline is. From the GNU Readline Documentation “The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in.” By default, Readline is set up in Emacs mode, no you dont have to have an extra four fingers to use Readline, most of the commands are simple.
Here are a couple of the commands I use daily:
Movement
~~~~~~~~
- To move to the beginning of a line, you press **^a** :sup:` ` `note: 1 <#sup1>`__
- To move to the end of a line you press **^e**
Killing and Yanking
~~~~~~~~~~~~~~~~~~~
- To cut the rest of the line from where your cursor is, to the end, you press **^k**
- To delete one word you press **^w**
- To paste either of the two previous back you can press **^y**
Miscellaneous
~~~~~~~~~~~~~
- To clear the screen and get to a fresh start, you can press **^l**
- To end your session you can send a **^d** (This will send an end of file character)
- To search for a command you typed recently, press **^r** and start typing, it will search backwards. **^r** again will search for an earlier match.
- The inverse of **^r ** is **^s**, they function the same.
Finally, dont forget about **^c**. While not specifically Readline, its very useful because it sends the SIGINT signal to the program, which if just on the command line, will not execute the line you have type, and give you a new line with nothing on it. A nice clean start.
To find out a lot more, read the documentation at `the Readline Commands Docs <http://www.gnu.org/software/bash/manual/html_node/Bindable-Readline-Commands.html>`__ I even learned some things while writing this up, apparently pressing **^x $** will list off all the possible usernames. Good to know, and good to always keep learning.
*1* `#sup1-back <#sup1-back>`__ *Also, if you are unaware, the convention I use is ^ to represent control, the documentation on Readline uses C- representing the same thing.*

View File

@ -0,0 +1,24 @@
SSH Agent on “boot”
###################
:date: 2015-01-09 04:03
:author: tyrel
:category: Linux
:slug: ssh-agent-on-boot
:status: published
I had a friend complain that he had to keep adding his ssh key to his ssh-agent every time he rebooted. I have a really easy bit of shell code you can put into your .bashrc or your .zshrc file:
.. code-block:: bash
SSHKEYFILE=/path/to/your/ssh/key/file
ssh-add -l | grep -q $SSHKEYFILE
RC=$?
if [ $RC -eq 1 ]
then
ssh-add -t 86400 $SSHKEYFILE
echo "Added key internal to keychain."
fi
This will check every time your bash or zsh rc file is sourced for your key in the currently added keys, and if its not, it will add it.
This has the benefit of not requiring you to put in your password every time you connect to a remote machine if you password your ssh keys (which you should).

View File

@ -0,0 +1,9 @@
Python Debugger
###############
:date: 2015-01-13 04:02
:author: tyrel
:category: Python
:slug: python-debugger
:status: published
When I worked at Propel Marketing, my dev team used to have presentations on things they loved. I love the Python debugger. Its very useful and I believe a proper understanding of how to use a debugger, will make you a better programmer. Here is a presentation on the debugger I made for my team. https://prezi.com/cdc4uyn4ghih/python-debugger/

View File

@ -0,0 +1,19 @@
Too many open files
###################
:date: 2015-01-28 04:02
:author: tyrel
:category: Python
:slug: too-many-open-files
:status: published
When I worked at Propel Marketing, we used to outsource static websites to a third party vendor, and then host them on our server. It was our job as developers to pull down the finished website zip file from the vendor, check it to make sure they used the proper domain name, (they didnt a lot of the time,) and make sure it actually looks nice. If these few criteria were met, we could launch the site.
Part of this process was SCPing the directory to our sites server. The sites server was where we had Apache running with every custom static site as a vhost. We would put the website in /var/www/vhosts/domain.name.here/ and then create the proper files in sites-available and sites-enabled (more on this in another entry). After that the next step was to run a checkconfig and restart Apache.
Heres where it all went wrong one day. If I can recall correctly, my boss was on vacation so he had me doing a bit of extra work and launching a few more sites than I usually do. Not only that, but we also had a deadline of the end of the month which was either the next day, or the day after. I figure Ill just setup all mine for two days, and then have some extra time the next day for other things to work on. So I started launching my sites. After each one, I would add the domain it was supposed to be at into my /etc/hosts file and make sure it worked.
I was probably half way done with my sites, and suddenly I ran into one that didnt work. I checked another one to see if maybe it was just my network being stupid and not liking my hosts file, but no, that wasnt the problem. Suddenly, EVERY SITE stopped working on this server. Panicking, I delete the symlink in sites-enabled and restart Apache. Everything works again. I then proceed to put that site aside, maybe something in the php files breaks the server, who knows, but I have other sites I can launch.
I setup the next site and the same thing happens again, no sites work. Okay, now its time to freak out and call our sysadmin. He didnt answer his phone, so I call my boss JB. I tell him the problem and he says he will reach out to the sysadmin and see what the problem is, all the while Im telling JB “Its not broken broken, it just doesnt work, its not my fault” etc etc. A couple hours later, our sysadmin emails us back and says he was able to fix the problem.
It turns out, theres a hard limit to the number of files your system can have open per user, and this was set to 1000 for the www-data user. The site I launched was coincidentally the 500th site on that server, each of them having an access.log and an error.log. These two files apparently constantly open on each site for apache to log to. He was able to change www-datas ulimit to a lot higher, (I dont recall now what it was) and that gave a lot more leeway in how many sites the sites server could host.

View File

@ -0,0 +1,11 @@
Hello, world!
#############
:date: 2021-10-31 19:10
:author: tyrel
:category: Hello
:slug: hello-world
:status: published
This blog here I want to keep some permanent tech thoughts in more written form than my `Wiki <https://tyrel.website/wiki/>`__ where I keep the majority of my written work. I do have a `flight blog <https://k3tas.radio/airband>`__, which has a lot of customization for maps and such that I don't want on here.
I have revived some old blog posts from previous blogs, to give some life to this blog while I'm writing new articles.

View File

@ -0,0 +1,36 @@
Python3 GitHub CLI tool as a refresher
######################################
:date: 2021-11-04 01:29
:author: tyrel
:category: Python
:slug: python3-github-cli-tool-as-a-refresher
:status: published
It's no lie that I love terminals. I wish I could live on a terminal and never really need to see a GUI application again.
Last night I migrated a lot of my old code from one GitLab account to another (`tyrelsouza <https://gitlab.com/tyrelsouza>`__ to `tyrel <https://gitlab.com/tyrel>`__) in an effort to clean up some of my usernames spread across the world. While doing that I noticed my `django-dbfilestorage <https://gitlab.com/tyrel/django-dbfilestorage>`__ Python module that has been sitting and rotting for three years. I played around a little bit in order to port it to Python 3.9, but I ran into some base64 errors. I tried a little bit but it was late and I couldn't figure it out. My resolve is that I have been away from Python for too long so the little things - that I knew and love - had fallen away. I mentioned this to my friend Alex and he said *"make a barebones github cli (readonly?) with issue viewer, and stats display"*. I've embarked on a journey to refresh my Python well enough to repair DBFS.
.. figure:: {static}/images/2021-11-04_alex_prompt.png
:alt: Me: "okay python frioends, what should I make as a quick refresher into the Python world?" alex: "maybe: barebonx github cli (reasdonly?) with issue viewer and stats display"
I knew I wanted to use ``httpx`` as my network client library, it's new, fast, and I have a couple friends who work on it. I started with a barebones ``requirements.in`` file, tossed in ``invoke``, ``pytes``\ t, and ``black``. From there I used ``pip-compile`` to generate my ``requirements.txt`` - (a tip I picked up recently while adding Pip-Compile support to the `Tidelift CLI <https://tidelift.com/cli>`__) and I was good to go.
The `docs for the GitHub API <https://docs.github.com/en/rest/overview/resources-in-the-rest-api#schema>`__ are pretty easy to read, so I knew all I really needed to do was set my ``Accept`` header to be Version3 and I could view the schema. With the schema saved to a ``.json`` file I then wrote a ``GHub`` class to pull this data down using ``httpx.client.Client.get``, super simple! The only two endpoints I care about right now are the user and repos endpoints, so I made two ``get_`` functions for each. After a little bit of work - which I won't bore you with the super intricate details - I have a functional cli.py file. For now, the only interaction is a propmt from ``rich`` for a username, and then you get a fancy table (also from ``rich``) of the first page of results of repos, stargazer/watchers/forks counts, and a description.
.. figure:: {static}/images/2021-11-04-01_prompting_and_table.png
:alt: Prompting for the username and showing my table of repositories.
Prompting for the username and showing my table of repositories.
It was a fun evening of learning what's changed in Python3 since I last touched it, especially as I've spent the majority of my career in Python2.7. Type annotations are super awesome. I'll probably pick it up again once I get some more free time later in the week. It's also nice blog fodder! I already have a million things I want to do next - pagination, caching, some more interaction.
.. figure:: {static}/images/2021-11-04_pytest_running.png
:alt: Showing py.test running
Showing py.test running
I know the tool I'm writing is nothing special, especially with their own `cli <https://github.com/cli/cli>`__ now, but I'm not looking at reinventing the wheel!
Check out the code so far on my `GitLab <https://gitlab.com/tyrel/ghub>`__ (*heh*, ironic it's there).
Dependencies: `httpx <https://www.python-httpx.org/>`__, `pip-tools <https://github.com/jazzband/pip-tools>`__, `black <https://github.com/psf/black>`__, `invoke <https://www.pyinvoke.org/>`__, `pytest <https://docs.pytest.org/en/6.2.x/>`__, `pytest-httpx <https://pypi.org/project/pytest-httpx/>`__, `rich <https://github.com/willmcgugan/rich>`__.

View File

@ -0,0 +1,60 @@
Finished my GitHub CLI tool
###########################
:date: 2021-11-05 00:08
:author: tyrel
:category: Python
:slug: finished-my-github-cli-tool
:status: published
I never intended this to be a full fleshed CLI tool comparable to the likes of the real GitHub CLI. This was simply a way to refresh myself and have fun. I have accomplished this, and am now calling this *"Feature Complete"*. You can play around with it yourself from the `repository on gitlab <https://gitlab.com/Tyrel/ghub>`__.
HTTPX.LINKS
-----------
Today I learned some fun things with ``httpx`` mainly. The main thing I focused on today was figuring out pagination. The GitHub API uses the ``link`` header which I had never seen before.
The format of the header is a url, and then a relationship of that url. Lets take my friend Andrey's repos for example:
.. raw:: html
<figure class="wp-block-pullquote">
..
``{'link': '<https://api.github.com/user/6292/repos?page=2>; rel="next", <https://api.github.com/user/6292/repos?page=7>; rel="last"', ...}``
sample header value from Getting Andrey's repositories list and checking pagination
.. raw:: html
</figure>
The link header there has two items split by a comma, each with two fields split by a semicolon, the first of which is a URL inside angle brackets... and AGHH this is going to be annoying to parse! Luckily ``httpx`` responses have this handled and ``client.get(...).links`` returns a lovely dictionary of the proper data.
With a ``response.links.get('next')`` check, you can get the url from ``response.links['next']['url']``. So much nicer than writing some regular expressions.
TESTING
-------
With that accomplished, I then added ``pytest-cov`` to my ``requirements.in`` and was able to leverage some coverage checks. I was about 30% with the latest changes (much higher than anticipated!) so I knew what I wanted to focus on next. The API seemed the easiest to test first again, so I changed around how I loaded my fixtures and made it pass in a name and open that file instead. In real code I would not have the function in both my test files, I would refactor it, but again, this is just a refresher, I'm lazy.
I decided earlier that I also wanted to catch HTTP 403 errors as I ran into a rate limit issue. Which, *I assure you dear reader*, was a thousand percent intentional so I would know what happens. Yeah, we'll go with that.
Py.Test has a context manager called ``pytest.raises`` and I was able to just ``with pytest.raises(httpx.HttpStatusError)`` and check that raise really easily.
The next bits of testing for the API were around the pagination, I faked two responses and needed to update my ``link`` header, checking the cases where there was NO ``link``, was multiple pages, and with my shortcut return - in case the response was an object not a list. Pretty straight forward.
The GHub file tests were kind of annoying, I'm leveraging ``rich.table.Table`` so I haven't been able to find a nice "this will make a string for you" without just using ``rich``'s ``print`` function. I decided the easiest check was to see if the ``Table.Columns.Cells`` matched what I wanted, which felt a little off but it's fine.
The way I generated the table is by making a generator in a pretty ugly way and having a bunch of ``repo['column'], repo['column']`` responses, rather than doing a dict comprehension and narrowing the keys down. If I ever come back to this, I MIGHT reassess that with a ``{k:v for k,v in repos if k in SELECTED_KEYS}`` and then yield a dictionary, but it's not worth the effort.
Overall I'd say this project was fun. It gave me a glimpse back into the Python world, and an excuse to write a couple blog posts. My next project is to get a Django site up and running again, so I can figure out how to debug my ``django-dbfilestorage``.
Closing Thoughts
----------------
If I had to do this again, I would probably have tried some test driven development. I've tried in the past, but I don't work on a lot of greenfield projects. I tend to be the kind of engineer who jumps HEAD FIRST into code and then tests are an after thought.
I also kind of want to rewrite this in Go and Rust, two other languages I've been fond of lately, just to see how they'd compare in fun. I haven't done any API calls with Rust yet, only made a little Roguelike by following `Herbert Wolverson's Hands-On-Rust book <https://pragprog.com/titles/hwrust/hands-on-rust/>`__. The `Tidelift CLI <https://tidelift.com/cli>`__ is all Go and a bazillion API calls (okay like ten) so that wouldn't be too hard to use like SPF13's Cobra CLI library and make a quick tool that way.
One fun thing I learned while moving things over to GitLab is that my user Tyrel is a super early adopter. I was in the first 36,000 people! I showed a screenshot of my user ID to my friend Sunanda at GitLab and we had fun finding that out.

View File

@ -0,0 +1,37 @@
Postmortem of a fun couple bugs
###############################
:date: 2021-11-11 14:55
:author: tyrel
:category: Go
:slug: postmortem-of-a-fun-couple-bugs
:status: published
Story at my previous job:
Tieg: Hey Tyrel, I can't run ``invoke sign 5555``, can you help with this?
This is How my night started last night at 10pm. My coworker Tieg did some work on our `CLI <https://tidelift.com/cli>`__ project and was trying to release the latest version. We use `invoke <https://www.pyinvoke.org/>`__ to run our code signing and deployment scripts, so I thought it was just a quick "oh maybe I screwed up some python!" fix. It wasn't.
I spent from 10:30 until 1:30am this morning going through and looking into why Tieg wasn't able to sign the code. The first thing I did was re-run the build on CircleCI, which had the same error, so hey! at least it was reproducible. The problem was that in our Makefile scripts we run ``tidelift version > tidelift-cli.version`` and then upload that to our deployment directories, but this was failing for some reason. We let clients download this file to see what the latest version is and then our CLI tool has the ability to selfupdate (except on homebrew) to pull this latest version if you're outdated.
Once I knew what was failing, I was able to use CircleCI's ssh commands and log in, and see what happened, but I was getting some other errors. I was seeing some problems with ``dbus-launch`` so I promptly (mistakenly) yelled to the void on twitter about ``dubs-launch``. Well would you know it, I may have mentioned before, but I work with Havoc Pennington.
Havoc Pennington: fortunately I wrote dbus-launch so may be able to tell you something, unfortunately it was like 15 years ago
Pumped about this new revelation, I started looking at our ``keychain`` dependency, because I thought the issue was there as that's the only thing that uses ``dbus`` on Linux. Then we decided (Havoc Pointed it out) that it was a red herring, and maybe the problem was elsewhere. I at least learned a bit about dbus and what it does, but not enough to really talk about it to any detail.
Would you know it, the problem was elsewhere. Tieg was running ``dtruss`` and saw that one time it was checking his ``/etc/hosts`` file when it was failing, and another time it was NOT, which was passing. Then pointed out a 50ms lookup to our ``download.tidelift.com`` host.
Tieg then found https://github.com/golang/go/issues/49517 this issue where someone mentions that Go 1.17.3 was failing them for net/http calls, but not the right way.
It turns out, that it wasn't the keyring stuff, it wasn't the *technically* the version calls that failed. What was happening is every command starts with a check to https://download.tidelift.com/cli/tidelift-cli.version which we then compare to the current running version, if it's different and outdated, we then say "you can run selfupdate!". What fails is that call to download.tidelift.com, because of compiling with go1.17.3 and a ``context canceled`` due to stream cleanup I guess?
Okay so we need to downgrade to Go 1.17.2 to fix this. Last night in my trying, I noticed that our CircleCI config was using ``circle/golang:1.16`` as its docker image, which has been superseded by ``cimg/go:1.16.x`` style of images. But I ran into some problems with that while upgrading to ``cimg/go:1.17.x``. The problem was due to the image having different permissions, so I couldn't write to the same directories that when Mike wrote our ``config.yml`` file, worked properly.
Tieg and I did a paired zoom chat and finished this up by cutting out all the testing/scanning stuff in our config files, and just getting down to the Build and Deploy steps. Found ANOTHER bug that Build seems to run as the ``circleci`` user, but Deploy was running as ``root``. So in the build ``working_directory`` setting, using a ``~/go/tidelift/cli`` path, worked. But when we restored the saved cache to Deploy, it still put it in ``/home/circle/go/tidelift/cli``, but then the ``working_directory`` of ``~/go/tidelift/cli`` was relative to ``/root/``. What a nightmare!
All tildes expanded to ``/home/circleci/go/tidelift/cli`` set, Makefile hacks undone, (removing windows+darwin+arm64 builds from your scripts during testing makes things A LOT faster!) and PR Merged, we were ready to roll.
I merged the PR, we cut a new version of TideliftCLI 1.2.5, updated the changelog and signed sealed delivered a new version which uses Go 1.17.2, writes the proper ``tidelift-cli.version`` file in deployment steps, and we were ready to ROCK!
That was fun day. Now it's time to write some rspec tests.

View File

@ -0,0 +1,101 @@
Garage Door Opener
##################
:date: 2022-01-09 22:46
:author: tyrel
:category: HomeAssistant
:slug: garage-door-opener
:status: published
I bought a house on October 9, 2020. This house has a garage door, and like any *normal person* of course I had to automate it.
One of the first things I did when I moved into my house was research some automation. I initially bought a half dozen `ESP8266 devices <https://www.amazon.com/gp/product/B07HF44GBT/ref=ppx_yo_dt_b_search_asin_title?ie=UTF8&psc=1>`__ and tried to figure out what I could do with them. I found Home Assistant and set that up on my server computer, along with ZoneMinder for security cameras.
.. figure:: {static}/images/2022-01-09_nodemcu_esp8266_module.jpg
:alt: NodeMCU ESP8266 module
NodeMCU ESP8266 module
I knew I would need some sort of relay (domain purchased from is gone) and `reed switches <https://www.amazon.com/gp/product/B00LYCUSBY/>`__ to trigger the door and sense its position, so I purchased some from the internet. But my friend Paul said all I needed was a `MOSFET <https://www.amazon.com/gp/product/B071J39678/>`__ so I bought one of those too. I tried to figure out how to trigger the door with a mosfet, but I was never able to. I won't document those failures.
.. figure:: {static}/images/2022-01-09_magnetic_reed_switch.png
:alt: Magnetic Reed Switch
:figclass: wp-image-183
Magnetic Reed Switch
Home Assistant has a plugin called ESPHome where you can write yaml files to configure an esp8266 module. This then builds a binary which you need to flash onto the esp8266 at least once via usb. From then on you can then on you can upload from a web form and drop the bin file in manually, or just press the UPLOAD button from ESPHome. I set my relay up on pin 19/D1 for the digital pin, and 16/GND,10/3v3 for the power. The Reed switch I tossed on 15/D7 and 11/GND but that could have been anywhere. See Schematic below. It still doesn't have an enclosure.
.. figure:: https://thumbnails-photos.amazon.com/v1/thumbnail/ipyyo6AWROe1t6_LDvhs-w?viewBox=1742%2C1306&ownerId=A2XF3XCOUKGXAO
:alt: Relay in blue, and wires going to the NodeMCU
Relay in blue, and wires going to the NodeMCU
.. figure:: {static}/images/2022-01-09_Garage_door_schematic.png
:alt: Schematic
Schematic
With the relay triggering, I still had one problem - I'd trigger the door and it wouldn't do anything! Something else was a problem. The wiring for the garage door terminates in four screws, two of which are the door trigger. I tried poking it with a multimeter and having someone push the door button on the wall, but I was never successful that way, as any contact between the two poles would just open the door anyway.
After some unsuccessful thinking, I figured it was time to purchase an oscilloscope. I've always wanted one, in fact I bought an old `Heathkit <https://i.redd.it/kcovfng8w4u71.jpg>`__ one once, but never got it working as I would have had to replace all the capacitors, and the insides is all perfboard - A NIGHTMARE.
I found this `USB Logic Analyzer and Oscilloscope <https://www.amazon.com/gp/product/B07PHS4C9B/>`__ on amazon and figure I'd try it out. It came while my father was in town, so he was pretty interested in seeing it work as well. Of course I'm very fond of open source software so I downloaded `PulseView <https://sigrok.org/wiki/PulseView>`__ and found out that the LHT00SU1 is a ``fx2lafw`` driver device. The interface to connect is really simple, you just look for your driver, and Scan for any usb devices that are using that driver and they show up to choose from.
I plugged the 1ACH and GND cables in and hooked them on the +/- wires where they attach to the door motor. Once you have a device connected, you then click Run on the top left, and trigger what ever mechanism (my garage door button) and see what happens.
I was very pleasantly surprised when I saw some movement on the A0 line!
.. figure:: {static}/images/2022-01-09_pulses_180ms_140ms.png
:alt: Pulses of about 180ms and 140ms
:figclass: wp-image-184
Pulses of about 180ms and 140ms
I added some markers to figure out what I needed to imitate in ESPHome, and saw that it's about 150ms high with a 225ms low, then another 150ms high and then low again.
This looks like this in yaml:
.. code:: yaml
switch:
- platform: gpio
pin: D1
id: relay
- platform: template
name: "Garage Remote"
icon: "mdi:gate"
turn_on_action:
- switch.turn_on: relay
- delay: 150ms
- switch.turn_off: relay
- delay: 225ms
- switch.turn_on: relay
- delay: 150ms
- switch.turn_off: relay
I'm pretty sure I jumped and screamed with excitement when it opened!
Once the door was opening and closing, I was able to add more yaml to set another binary sensor to show whether it was open or closed (from the reed sensor):
.. code:: yaml
binary_sensor:
- platform: gpio
pin:
number: D7
inverted: true
mode: INPUT_PULLUP
name: "Garage Door Closed"
All together this is shown on my Home Assistant Lovelace dashboard using two cards, one that shows a closed door, and one with an open door (both actual pictures of the door!) with a button to open it. Once it opens or closes the other card switches into place, Home Assistant at least at the time didn't have good conditional cards like I wanted.
.. figure:: {static}/images/2022-01-09_Lovelace_Card_-_garage_door_closed.png
:alt: Closed door state and button
Closed door state and button
Happy with the state of my Garage Door opening button, I can now yell at my phone to open the garage door (it's a "secure" switch so it requires the phone to to be open before OK Google will trigger the door).
There's a couple more pictures in my `Instagram post <https://www.instagram.com/p/CIrYO3SlxON/>`__ about it, and another `write up on my wiki. <https://tyrel.website/wiki/Garage_Door_ESPHome>`__
I know I could have bought a device to do this myself, but this is fully mine, my code, and my experiment with learning how to automate things at home, I gained way more out of this project than I did if I just bought a MyQ or what ever is popular these days.

View File

@ -0,0 +1,13 @@
An Update On Flying
###################
:date: 2022-05-07 01:15
:author: tyrel
:category: Personal
:slug: an-update-on-flying
:status: published
I took ten years to get my pilot's license. From March 17, 2010 to December 30, 2020. It was amazing. I now find myself a year and a half later from achieving my goal and I don't find myself interested enough right now to go flying. There's a gas crisis, there's a pandemic, there's a lot of political things going on, a war in Ukraine, that it kind of feels bad wasting hundreds of dollars just going sight seeing.
I just completed a ground school course for my Instrument Rating -- I still need to take the written test part. With that out of the way I can start actually flying with an instructor to get my instrument rating. One of the planes the club that I am a part of has is a `Mooney M20J <http://wingsofcarolina.org/aircraft>`__. This requires 250 hours of flight time, or 100 hours and an instrument rating. I'm at that annoying 145 hour mark that dissuades me from wasting 100 hours just to fly that plane, and wanting to get my instrument rating.
I left my previous job last December so I didn't have the excess money to fly for a month while job hunting, and well, habit becomes habit... I haven't flown since `October <https://k3tas.radio/airband/2021/10/18/october-17-2021-back-above-keene/>`__! I'm definitely in a better place now, with a much nicer job and salary though. I'm hoping to maybe pick it back up again this fall. I wasn't pleased AT ALL (those who follow me on twitter will probably know this, online class is not the environment for me) with the ground school over Zoom, so I want to redo this by watching the Sporty's ground school. I need to put aside some time over the next coming weeks to actually sit down and watch it. Hopefully I can start flying with an instructor soon. I'm not looking forward to taking the written test, as I have to go visit a testing center at RDU airport - so there is that kind of delaying me too.

View File

@ -0,0 +1,107 @@
Writing an EPUB parser. Part 1
##############################
:date: 2022-06-01 01:41
:author: tyrel
:category: Python
:tags: epub, python
:slug: writing-an-epub-parser-part-1
:status: published
Parsing Epubs
-------------
Recently I've become frustrated with the experience of reading books on my Kindle Paperwhite. The swipe features, really bother me. I really like MoonReader on Android, but reading on my phone isn't always pleasing. This lead me to look into other hardware. I've been eyeing the BOOX company a while ago, but definitely considering some of their new offerings some time. Until the time I can afford the money to splurge on a new ebook reader, I've decided to start a new project, making my own ebook reader tools!
I'm starting with EPUBs, as this is one of the easiest to work with. At its core, an EPUB is a zip file with the ``.epub`` extension instead of ``.epub`` with many individual XHTML file chapters inside it. You can read more of how they're structured yourself over at `FILEFORMAT <https://docs.fileformat.com/ebook/epub/>`__.
The tool I've chosen for reading EPUBs is the Python library `ebooklib <https://pypi.org/project/EbookLib/>`__. This seemed to be a nice lightweight library for reading EPUBs. I also used `DearPyGUI <https://pypi.org/project/dearpygui/>`__ for showing this to the screen, because I figured why not, I like GUI libraries.
My first task was to find an EPUB file, so I downloaded one from my calibre server. I convert all my ebook files to ``.epub`` and ``.mobi`` on my calibre server so I can access them anywhere I can read my OPDS feed. I chose Throne of Glass (abbreviating to ``TOG.epub`` for rest of post). Loading I launched Python, and ran
.. code:: console
>>> from ebooklib import epub
>>> print(book := epub.read_epub("TOG.epub")
This returned me a ``<ebooklib.epub.EpubBook object...>`` , seeing I had an EpubBook I ran a ``dir(book)`` and found the properties available to me
.. code:: python
['add_author', 'add_item', 'add_metadata', 'add_prefix',
'bindings',
'direction',
'get_item_with_href', 'get_item_with_id', 'get_items',
'get_items_of_media_type', 'get_items_of_type', 'get_metadata',
'get_template', 'guide',
'items',
'language',
'metadata',
'namespaces',
'pages', 'prefixes',
'reset',
'set_cover', 'set_direction', 'set_identifier', 'set_language', 'set_template', 'set_title', 'set_unique_metadata', 'spine',
'templates', 'title', 'toc',
'uid',
'version']
Of note, the ``get_item_with_X`` entries caught my eye, as well as ``spine``. For my file, ``book.spine`` looks like it gave me a bunch of tuples of ID and a ``"yes"`` string of which I had no Idea what was. I then noticed I had a ``toc`` property, assuming that was a Table of Contents, I printed that out and saw a bunch of ``epub.Link`` objects. This looks like something I could use.
I will note, at this time I was thinking that this wasn't the direction I wanted to take this project. I really wanted to learn how to parse these things myself, unzip, parse XML, or HTML, etc., but I realized I needed to see someone else's work to even know what is going on. With this "defeat for the evening" admitted, I figured hey, why not at least make SOMETHING, right?" I decided to carry on.
Seeing I was on at least some track, I opened up PyCharm and made a new Project. First I setup a class called Epub, made a couple of functions for setting things up and ended up with
.. code:: python
class Epub:
def __init__(self, book_path: str) -> None:
self.contents: ebooklib.epub.EpubBook = epub.read_epub(book_path)
self.title: str = self.contents.title
self.toc: List[ebooklib.epub.Link] = self.contents.toc
I then setup a ``parse_chapters`` file, where I loop through the TOC. Here I went to the definition of ``Link`` and saw I was able to get a ``href`` and a ``title``, I decided my object for chapters would be a dictionary (I'll move to a DataClass later) with ``title`` and ``content``. I remembered from earlier I had a ``get_item_by_href`` so I stored the itext from the TOC's href: ``self.contents.get_item_with_href(link.href).get_content()``. This would later prove to be a bad decision when I opened "The Fold.epub" and realized that a TOC could have a tuple of ``Section`` and ``Link``, not just ``Links``. I ended up storing the item itself, and doing a double loop in the ``parse_chapters`` function to loop if it's a tuple.
.. code:: python
def parse_chapters(self) -> None:
idx = 0
for _item in self.toc:
if isinstance(_item, tuple): # In case is section tuple(section, [link, ...])
for link in _item[1]:
self._parse_link(idx, link)
idx += 1
else:
self._parse_link(idx, _item)
idx += 1
``_parse_link`` simply makes that dictionary of ``title`` and ``item`` I mentioned earlier, with a new ``index`` as I introduced buttons in the DearPyGUI at this time as well.
.. code:: python
def _parse_link(self, idx, link) -> None:
title = link.title
self.chapters.append(dict(
index=idx,
title=title,
item=self.contents.get_item_with_href(link.href)
))
That's really all there is to make an MVP of an EPUB parser. You can use ``BeautifulSoup`` to parse the HTML from the ``get_body_contents()`` calls on items, to make more readable text if you want, but depending on your front end, the HTML may be what you want.
In my implementation my Epub class keeps track of the currently selected chapter, so this loads from all chapters and sets the ``current_text`` variable.
.. code:: python
def load_view(self) -> None:
item = self.chapters[self.current_index]['item']
soup = BeautifulSoup(item.get_body_content(), "html.parser")
text = [para.get_text() for para in soup.find_all("p")]
self.current_text = "\n".join(text)
I don't believe any of this code will be useful to anyone outside of my research for now, but it's my first step into writing an EPUB parser myself.
The DearPyGUI steps are out of scope of this blog post, but here is my `final ebook Reader <https://gist.github.com/tyrelsouza/9c6681850fc00bf5d9f35568faf611d4>`__ which is super inefficient!
.. figure:: {static}/images/2022-06-01_ebook_reader.png
:alt: final ebook reader, chapters on left, text on right
I figure the Dedication page is not *as* copywrited as the rest of the book, so it's fair play showing that much. Sarah J Maas, if you have any issues, I can find another book for my screenshots.

View File

@ -0,0 +1,43 @@
2016 Monitoring a CO2 tank in a Lab with a raspberry pi
#######################################################
:date: 2022-06-02 16:54
:author: tyrel
:category: Linux
:slug: 2016-monitoring-a-co2-tank-in-a-lab-with-a-raspberry-pi
:status: published
This was written in 2017, but I found a copy again, I wanted to post it again.
The Story
---------
For a few months last year, I lived around the block from work. I would sometimes stop in on the weekends and pick up stuff I forgot at my desk. One day the power went out at my apartment and I figure I would check in at work and see if there were any problems. I messaged our Lab Safety Manager on slack to say "hey the power went out, and I am at the office. Is there anything you'd like me to check?". He said he hadn't even gotten the alarm email/pages yet, so if I would check out in the lab and send him a picture of the CO2 tanks to make sure that nothing with the power outage compromised those. Once I had procured access to the BL2 lab on my building badge, I made my way out back and took a lovely picture of the tanks, everything was fine.
The following week, in my one on one meeting with my manager, I mentioned what happened and she and I discussed the event. It clearly isn't sustainable sending someone in any time there was a power outage if we didn't need to, but the lab equipment doesn't have any monitoring ports.
Operation Lab Cam was born. I decided to put together a prototype of a Raspberry Pi 3 with a camera module and play around with getting a way to monitor the display on the tanks. After a few months of not touching the project, I dug into it in a downtime day again. The result is now we have an automated camera box that will take a picture once a minute and display it on an auto refreshing web page. There are many professional products out there that do exactly this, but I wanted something that has the ability to be upgraded in the future.
Summary of the Technical Details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the entire process is managed by one bash script, which is a little clunky, but it's livable. The implementation of the script goes a little like:
#. Take a picture to a temporary location.
#. Add a graphical time stamp.
#. Copy that image to both the currently served image, and a timestamped filename backup.
The web page that serves the image is just a simple web page that shows the image, and refreshes once every thirty seconds.
The Gritty Technical Details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The program I'm using to take pictures is the **raspistill** program. If I had written my script to just call **raspistill** every time I wanted a picture taken, it would have potentially taken a lot longer to save the images. This happens because it needs to meter the image every time, which adds up. The solution is Signal mode and turning raspistill into a daemon. If you enable signal mode, any time you send a SIGUSR1 to the process, the backgrounded process will then take the image.
Instead if setting up a service with *systemd*, I have a small bash script. At the beginning, I run a **ps aux** and check if raspistill is running, if it's not, I start up a new instance of raspistill with the appropriate options and background it. The next time this script runs, it will detect that raspistill is running and be almost a no-op.
After this, I send a SIGUSR1 (kill -10) to take the picture which is then saved, un-timestamped. Next up I call imagemagick's convert on this image, I crop out the center (so I couldn't use **raspistill's** "-a 12" option) because all I care about is a 500x700 pixel region.
This is then copied to the image that is served by the web page, and also backed up in a directory that nginx will listen to.
.. figure:: {static}/images/2022-06-02_bl2cam.jpg
:alt: Leds on a CO2 tank

View File

@ -0,0 +1,43 @@
Scrollbar Colors
################
:date: 2022-10-13 12:07
:author: tyrel
:category: Website
:slug: scrollbar-colors
:status: published
Was talking to someone about CSS Nostalgia and "back in my day" when scrollbar colors came up.
.. code:: css
/* For Chromium based browsers */
::-webkit-scrollbar {
background: #2A365F;
}
::-webkit-scrollbar-thumb {
background: #514763;
}
/* For Firefox */
html {
scrollbar-color: #514763 #2A365F;
}
Firefox and Chrome have different selectors, so in order to support the majority of browsers, you need both.
.. figure:: {static}/images/2022-10-13-chrome_scrollbar.png
:alt: Safari with a blue/purple scrollbar
:width: 940px
:height: 688px
Safari with a blue/purple scrollbar
.. figure:: {static}/images/2022-10-13_safari_scrollbar.png
:alt: Chrome with a blue/purple scrollbar
Chrome with a blue/purple scrollbar
.. figure:: {static}/images/2022-10-13_firefox_scrollbar.png
:alt: Firefox with a blue/purple scrollbar
Firefox with a blue/purple scrollbar

BIN
content/images/.DS_Store vendored Normal file

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 259 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 595 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 557 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 573 KiB

14
content/pages/about.rst Normal file
View File

@ -0,0 +1,14 @@
About
######
:date: 2022-05-01 00:00
:author: tyrel
:category: About
:slug: about
:status: published
Senior Software Engineer with a back end focus. Specializing in Python and Go.
Im a very infrequent blogger. I will go months without any posts, some time years.

21
content/pages/resume.rst Normal file
View File

@ -0,0 +1,21 @@
Resume
######
:date: 2022-05-05 13:06
:author: tyrel
:category: Resume
:slug: resume
:status: published
Also https://cv.tyrel.dev/ for more up to date resume.
.. container:: wp-block-file
.. raw:: html
<object class="wp-block-file__embed" data="https://tyrel.dev/random/wp-content/uploads/2022/05/Tyrel-Souza-Resume-2022.pdf" type="application/pdf" style="width:100%;height:1190px" aria-label="Embed of Tyrel-Souza-Resume-2022.">
.. raw:: html
</object>
`Tyrel-Souza-Resume-2022 <https://tyrel.dev/random/wp-content/uploads/2022/05/Tyrel-Souza-Resume-2022.pdf>`__

39
pelicanconf.py Normal file
View File

@ -0,0 +1,39 @@
AUTHOR = 'Tyrel Souza'
SITENAME = 'Tyrel Site'
TWITTER_USERNAME = "tyrelsouza"
SITEURL = ''
SITESUBTITLE = 'Code, Home Automation, Personal Blog'
MENUITEMS = [
("Blog", "/")
]
PATH = 'content'
TIMEZONE = 'America/New_York'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (
('Code', 'https://gitea.tyrel.dev/'),
)
# Social widget
SOCIAL = (
('Twitter', 'https://twitter.com/tyrelsouza'),
('Instagram', 'https://instagram.com/tyrelsouza'),
)
PLUGINS = [ ]
DEFAULT_PAGINATION = 10
DISPLAY_CATEGORIES_ON_MENU = False
DELETE_OUTPUT_DIRECTORY = True
STATIC_PATHS = ("images",)

21
publishconf.py Normal file
View File

@ -0,0 +1,21 @@
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = ''
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""

143
tasks.py Normal file
View File

@ -0,0 +1,143 @@
# -*- coding: utf-8 -*-
import os
import shlex
import shutil
import sys
import datetime
from invoke import task
from invoke.main import program
from invoke.util import cd
from pelican import main as pelican_main
from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer
from pelican.settings import DEFAULT_CONFIG, get_settings_from_file
OPEN_BROWSER_ON_SERVE = True
SETTINGS_FILE_BASE = 'pelicanconf.py'
SETTINGS = {}
SETTINGS.update(DEFAULT_CONFIG)
LOCAL_SETTINGS = get_settings_from_file(SETTINGS_FILE_BASE)
SETTINGS.update(LOCAL_SETTINGS)
CONFIG = {
'settings_base': SETTINGS_FILE_BASE,
'settings_publish': 'publishconf.py',
# Output path. Can be absolute or relative to tasks.py. Default: 'output'
'deploy_path': SETTINGS['OUTPUT_PATH'],
# Remote server configuration
'ssh_user': 'tyrel',
'ssh_host': 'localhost',
'ssh_port': '22',
'ssh_path': '/Users/tyrel/pelican-site',
# Host and port for `serve`
'host': 'localhost',
'port': 8000,
}
@task
def clean(c):
"""Remove generated files"""
if os.path.isdir(CONFIG['deploy_path']):
shutil.rmtree(CONFIG['deploy_path'])
os.makedirs(CONFIG['deploy_path'])
@task
def build(c):
"""Build local version of site"""
pelican_run('-s {settings_base}'.format(**CONFIG))
@task
def rebuild(c):
"""`build` with the delete switch"""
pelican_run('-d -s {settings_base}'.format(**CONFIG))
@task
def regenerate(c):
"""Automatically regenerate site upon file modification"""
pelican_run('-r -s {settings_base}'.format(**CONFIG))
@task
def serve(c):
"""Serve site at http://$HOST:$PORT/ (default is localhost:8000)"""
class AddressReuseTCPServer(RootedHTTPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(
CONFIG['deploy_path'],
(CONFIG['host'], CONFIG['port']),
ComplexHTTPRequestHandler)
if OPEN_BROWSER_ON_SERVE:
# Open site in default browser
import webbrowser
webbrowser.open("http://{host}:{port}".format(**CONFIG))
sys.stderr.write('Serving at {host}:{port} ...\n'.format(**CONFIG))
server.serve_forever()
@task
def reserve(c):
"""`build`, then `serve`"""
build(c)
serve(c)
@task
def preview(c):
"""Build production version of site"""
pelican_run('-s {settings_publish}'.format(**CONFIG))
@task
def livereload(c):
"""Automatically reload browser tab upon file modification."""
from livereload import Server
def cached_build():
cmd = '-s {settings_base} -e CACHE_CONTENT=true LOAD_CONTENT_CACHE=true'
pelican_run(cmd.format(**CONFIG))
cached_build()
server = Server()
theme_path = SETTINGS['THEME']
watched_globs = [
CONFIG['settings_base'],
'{}/templates/**/*.html'.format(theme_path),
]
content_file_extensions = ['.md', '.rst']
for extension in content_file_extensions:
content_glob = '{0}/**/*{1}'.format(SETTINGS['PATH'], extension)
watched_globs.append(content_glob)
static_file_extensions = ['.css', '.js']
for extension in static_file_extensions:
static_file_glob = '{0}/static/**/*{1}'.format(theme_path, extension)
watched_globs.append(static_file_glob)
for glob in watched_globs:
server.watch(glob, cached_build)
if OPEN_BROWSER_ON_SERVE:
# Open site in default browser
import webbrowser
webbrowser.open("http://{host}:{port}".format(**CONFIG))
server.serve(host=CONFIG['host'], port=CONFIG['port'], root=CONFIG['deploy_path'])
@task
def publish(c):
"""Publish to production via rsync"""
pelican_run('-s {settings_publish}'.format(**CONFIG))
c.run(
'rsync --delete --exclude ".DS_Store" -pthrvz -c '
'-e "ssh -p {ssh_port}" '
'{} {ssh_user}@{ssh_host}:{ssh_path}'.format(
CONFIG['deploy_path'].rstrip('/') + '/',
**CONFIG))
def pelican_run(cmd):
cmd += ' ' + program.core.remainder # allows to pass-through args to pelican
pelican_main(shlex.split(cmd))