Skip to content

Instantly share code, notes, and snippets.

@cratermoon
Created March 5, 2026 17:28
Show Gist options
  • Select an option

  • Save cratermoon/82909369591df5a2acead565440f45b3 to your computer and use it in GitHub Desktop.

Select an option

Save cratermoon/82909369591df5a2acead565440f45b3 to your computer and use it in GitHub Desktop.
@online{AIBlindspots,
title = {{{AI Blindspots}}},
author = {Yang, Edward Z.},
date = {2025-03-13},
url = {https://ezyang.github.io/ai-blindspots/},
urldate = {2025-03-23},
abstract = {Blindspots in LLMs I’ve noticed while AI coding. Sonnet family emphasis. Maybe I will eventually suggest Cursor rules for these problems.},
langid = {american},
organization = {AI Blindspots},
file = {/Users/sen/Zotero/storage/BB7AWJGQ/ai-blindspots.html}
}
@online{AINewAesthetics,
title = {{{AI}}: {{The New Aesthetics}} of {{Fascism}}},
shorttitle = {{{AI}}},
author = {Watkins, Gareth},
date = {2025-02-09},
url = {https://newsocialist.org.uk/transmissions/ai-the-new-aesthetics-of-fascism/},
urldate = {2025-05-11},
abstract = {It's embarrassing, destructive, and looks like shit: AI-generated art is the perfect aesthetic form for the far right.},
langid = {english},
organization = {New Socialist},
file = {/Users/sen/Zotero/storage/M3NJUDY8/ai-the-new-aesthetics-of-fascism.html}
}
@online{AIsOstensibleEmergent2023,
title = {{{AI}}’s {{Ostensible Emergent Abilities Are}} a {{Mirage}}},
date = {2023-05-08},
url = {https://hai.stanford.edu/news/ais-ostensible-emergent-abilities-are-mirage},
urldate = {2025-02-06},
abstract = {According to Stanford researchers, large language models are not greater than the sum of their parts.},
langid = {english},
file = {/Users/sen/Zotero/storage/TTWFJI4Y/ais-ostensible-emergent-abilities-are-mirage.html}
}
@online{alonsoRecurringCycleDeveloper2025,
title = {The {{Recurring Cycle}} of '{{Developer Replacement}}' {{Hype}}},
author = {Alonso, Danilo},
date = {2025-05-19T06:04:05},
url = {https://alonso.network/the-recurring-cycle-of-developer-replacement-hype/},
urldate = {2025-05-27},
abstract = {AI isn't replacing developers, it's transforming them. Just as NoCode created specialists and cloud turned sysadmins into DevOps engineers, AI elevates engineers from code writers to system architects. The most valuable skill isn't writing code, it's designing coherent systems.},
langid = {english},
organization = {Alonso Network},
file = {/Users/sen/Zotero/storage/CKYR4VDU/the-recurring-cycle-of-developer-replacement-hype.html}
}
@online{AntiquaNovaNote2025,
title = {Antiqua et Nova. {{Note}}~on the {{Relationship Between Artificial Intelligence}} and {{Human Intelligence}} (28 {{January}} 2025)},
date = {2025-01-04},
url = {https://www.vatican.va/roman_curia/congregations/cfaith/documents/rc_ddf_doc_20250128_antiqua-et-nova_en.html#_ftnref78},
urldate = {2025-02-17},
file = {/Users/sen/Zotero/storage/YX3EZPF9/rc_ddf_doc_20250128_antiqua-et-nova_en.html}
}
@online{arkoudasGPT4CanReason2023,
title = {{{GPT-4 Can}}'t {{Reason}}},
author = {Arkoudas, Konstantine},
date = {2023-08-02},
number = {2023080148},
doi = {10.20944/preprints202308.0148.v1},
url = {https://www.preprints.org/manuscript/202308.0148/v1},
urldate = {2023-08-08},
abstract = {GPT-4 was released in March 2023 to wide acclaim, marking a very substantial improvement across the board over GPT-3.5 (OpenAI's previously best model, which had powered the initial release of ChatGPT). Despite the genuinely impressive improvement, however, there are good reasons to be highly skeptical of GPT-4's ability to reason. This position paper discusses the nature of reasoning; criticizes the current formulation of reasoning problems in the NLP community and the way in which the reasoning performance of LLMs is currently evaluated; introduces a collection of 21 diverse reasoning problems; and performs a detailed qualitative analysis of GPT-4's performance on these problems. Based on the results of this analysis, the paper argues that, despite the occasional flashes of analytical brilliance, GPT-4 at present is utterly incapable of reasoning.},
langid = {english},
pubstate = {prepublished},
keywords = {AI,artificial intelligence (a.i.),cognitive processes,GenAI,GPT-4,inference,LLM,programming,reasoning},
file = {/Users/sen/Zotero/storage/FSV9JI78/Arkoudas - 2023 - GPT-4 Can't Reason.pdf}
}
@online{bachThereSomethingYour2025,
title = {Is {{There Something}} in {{Your}} "{{I}}"?},
author = {Bach, James},
date = {2025-05-04},
url = {https://www.satisfice.com/blog/archives/487881},
urldate = {2025-05-21},
abstract = {If you present someone else's work as if it were your own, no one will respect you, and you won't even respect yourself. If someone is paying you to do though},
langid = {british},
organization = {Satisfice, Inc.},
file = {/Users/sen/Zotero/storage/5VZXTNBJ/487881.html}
}
@inproceedings{benderDangersStochasticParrots2021,
title = {On the {{Dangers}} of {{Stochastic Parrots}}: {{Can Language Models Be Too Big}}? 🦜},
shorttitle = {On the {{Dangers}} of {{Stochastic Parrots}}},
booktitle = {Proceedings of the 2021 {{ACM Conference}} on {{Fairness}}, {{Accountability}}, and {{Transparency}}},
author = {Bender, Emily M. and Gebru, Timnit and McMillan-Major, Angelina and Shmitchell, Shmargaret},
date = {2021-03-01},
series = {{{FAccT}} '21},
pages = {610--623},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/3442188.3445922},
url = {https://dl.acm.org/doi/10.1145/3442188.3445922},
urldate = {2023-08-25},
abstract = {The past 3 years of work in NLP have been characterized by the development and deployment of ever larger language models, especially for English. BERT, its variants, GPT-2/3, and others, most recently Switch-C, have pushed the boundaries of the possible both through architectural innovations and through sheer size. Using these pretrained models and the methodology of fine-tuning them for specific tasks, researchers have extended the state of the art on a wide array of tasks as measured by leaderboards on specific benchmarks for English. In this paper, we take a step back and ask: How big is too big? What are the possible risks associated with this technology and what paths are available for mitigating those risks? We provide recommendations including weighing the environmental and financial costs first, investing resources into curating and carefully documenting datasets rather than ingesting everything on the web, carrying out pre-development exercises evaluating how the planned approach fits into research and development goals and supports stakeholder values, and encouraging research directions beyond ever larger language models.},
isbn = {978-1-4503-8309-7},
keywords = {programming},
file = {/Users/sen/Zotero/storage/GM6UBHCJ/Bender et al. - 2021 - On the Dangers of Stochastic Parrots Can Language.pdf}
}
@online{bergstromModernDayOracles2025,
title = {Modern {{Day Oracles}} or {{Bullshit Machines}}?},
author = {Bergstrom, Carl T. and West, Jevin D.},
date = {2025},
url = {https://thebullshitmachines.com/},
urldate = {2025-02-09},
langid = {english},
file = {/Users/sen/Zotero/storage/R4774SNZ/thebullshitmachines.com.html}
}
@online{biskExperienceGroundsLanguage2020,
title = {Experience {{Grounds Language}}},
author = {Bisk, Yonatan and Holtzman, Ari and Thomason, Jesse and Andreas, Jacob and Bengio, Yoshua and Chai, Joyce and Lapata, Mirella and Lazaridou, Angeliki and May, Jonathan and Nisnevich, Aleksandr and Pinto, Nicolas and Turian, Joseph},
date = {2020-11-02},
eprint = {2004.10151},
eprinttype = {arXiv},
eprintclass = {cs},
doi = {10.48550/arXiv.2004.10151},
url = {http://arxiv.org/abs/2004.10151},
urldate = {2024-12-28},
abstract = {Language understanding research is held back by a failure to relate language to the physical world it describes and to the social interactions it facilitates. Despite the incredible effectiveness of language processing models to tackle tasks after being trained on text alone, successful linguistic communication relies on a shared experience of the world. It is this shared experience that makes utterances meaningful. Natural language processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large, text-only corpora requires the parallel tradition of research on the broader physical and social context of language to address the deeper questions of communication.},
pubstate = {prepublished},
keywords = {Artificial intelligence (AI),Computer Science - Artificial Intelligence,Computer Science - Computation and Language,Computer Science - Machine Learning},
note = {Comment: Empirical Methods in Natural Language Processing (EMNLP), 2020},
file = {/Users/sen/Zotero/storage/3A7C64G3/Bisk et al. - 2020 - Experience Grounds Language.pdf;/Users/sen/Zotero/storage/SZY8ED59/2004.html}
}
@online{bjarnasonAIDickMove2026,
title = {'{{AI}}' Is a Dick Move, Redux},
author = {Bjarnason, Baldur},
date = {2026-01-20},
url = {https://www.baldurbjarnason.com/notes/2026/note-on-debating-llm-fans/},
urldate = {2026-02-07},
abstract = {Writing at the end of the world, from Hveragerði, Iceland},
langid = {english},
file = {/Users/sen/Zotero/storage/AZ4JSD62/note-on-debating-llm-fans.html}
}
@online{bjarnasonLLMentalistEffectHow2023,
title = {The {{LLMentalist Effect}}: How Chat-Based {{Large Language Models}} Rep…},
shorttitle = {The {{LLMentalist Effect}}},
author = {Bjarnason, Baldur},
date = {2023-07-04},
url = {https://softwarecrisis.dev/letters/llmentalist/},
urldate = {2025-06-09},
abstract = {The new era of tech seems to be built on superstitious behaviour},
langid = {english},
organization = {Out of the Software Crisis},
file = {/Users/sen/Zotero/storage/LDQ5R4H7/llmentalist.html}
}
@online{bondWhatHappenedYear,
title = {What {{Happened}} the {{Year I Banned AI}}},
author = {Bond, Chanea},
date = {2025-07-29},
url = {https://www.edutopia.org/article/banning-ai-tools-class/},
urldate = {2025-08-30},
abstract = {The choice to keep artificial intelligence tools out of my class has been the most impactful decision that I have made as a teacher.},
langid = {english},
organization = {Edutopia},
keywords = {genai},
file = {/Users/sen/Zotero/storage/5LTJUE4X/banning-ai-tools-class.html}
}
@online{bostoniensisDearEverybodyRe2023,
type = {Mastodon post},
title = {Dear Everybody Re {{ChatGPT}} Etc,{{The}} Word You Need That You Don't Know You Need Is {{CONFABULATION}}.{{What}} y'all Are Calling "Hallucination" Is, In…},
author = {Bostoniensis, Sibylla (@siderea@universeodon.com), Siderea},
date = {2023-02-17},
url = {https://universeodon.com/@siderea/109883198218504351},
urldate = {2023-05-30},
abstract = {Dear everybody re ChatGPT etc,The word you need that you don't know you need is CONFABULATION.What y'all are calling "hallucination" is, in neurology and psychology (where it means two slightly different things) called "confabulation".It means when somebody's just making up something and has no idea that they're making things up, because their brain/mind is glitching.A lot of folks are both trying to understand the AI chatbots and are trying to grapple with the possible implications for how organic minds work, by speculating about human cognition. Y'all should definitely check into the history of actual research into this topic, it will make your sock roll up and down, and blow your minds. And one of the key areas will be surfaced with that keyword. There have been a bunch of very clever experiments that have been done on humans and how they explain themselves which betrays that there are parts of the mind that are surprisingly - and even alarmingly - independent.Frex...},
langid = {english},
organization = {Mastodon},
keywords = {artificial intelligence (a.i.),confabulation,GenAI,programming},
file = {/Users/sen/Zotero/storage/2QUCJ7D7/109883198218504351.html}
}
@online{brayAIAngst2025,
title = {{{AI Angst}}},
author = {Bray, Tim},
date = {2025-06-06},
url = {https://www.tbray.org/ongoing/When/202x/2025/06/06/My-AI-Angst},
urldate = {2025-06-09},
langid = {english},
organization = {ongoing by Tim Bray},
file = {/Users/sen/Zotero/storage/4WG35XJG/My-AI-Angst.html}
}
@online{brayRealGenAIIssue2025,
title = {The {{Real GenAI Issue}}},
author = {Bray, Tim},
date = {2025-07-06},
url = {https://www.tbray.org/ongoing/When/202x/2025/07/06/AI-Manifesto},
urldate = {2025-07-06},
langid = {english},
organization = {ongoing by Tim Bray},
file = {/Users/sen/Zotero/storage/W8DTEQKE/AI-Manifesto.html}
}
@online{brownToolmen2025,
title = {Toolmen},
author = {Brown, Mandy},
date = {2025-05-30},
url = {https://aworkinglibrary.com/writing/toolmen},
urldate = {2025-05-31},
abstract = {Even the best weapon is an unhappy tool.},
organization = {A Working Library},
file = {/Users/sen/Zotero/storage/RJ9E4ESW/toolmen.html}
}
@online{BuildRetrievalAugmented,
title = {Build a {{Retrieval Augmented Generation}} ({{RAG}}) {{App}} | 🦜️🔗 {{LangChain}}},
url = {https://python.langchain.com/docs/tutorials/rag/},
urldate = {2024-10-24},
abstract = {One of the most powerful applications enabled by LLMs is sophisticated question-answering (Q\&A) chatbots. These are applications that can answer questions about specific source information. These applications use a technique known as Retrieval Augmented Generation, or RAG.},
langid = {english},
file = {/Users/sen/Zotero/storage/HN8FR2AK/rag.html}
}
@inproceedings{buolamwiniGenderShadesIntersectional2018,
title = {Gender {{Shades}}: {{Intersectional Accuracy Disparities}} in {{Commercial Gender Classification}}},
shorttitle = {Gender {{Shades}}},
booktitle = {Proceedings of the 1st {{Conference}} on {{Fairness}}, {{Accountability}} and {{Transparency}}},
author = {Buolamwini, Joy and Gebru, Timnit},
date = {2018-01-21},
pages = {77--91},
publisher = {PMLR},
issn = {2640-3498},
url = {https://proceedings.mlr.press/v81/buolamwini18a.html},
urldate = {2021-11-05},
abstract = {Recent studies demonstrate that machine learning algorithms can discriminate based on classes like race and gender. In this work, we present an approach to evaluate bias present in automated facial analysis algorithms and datasets with respect to phenotypic subgroups. Using the dermatologist approved Fitzpatrick Skin Type classification system, we characterize the gender and skin type distribution of two facial analysis benchmarks, IJB-A and Adience. We find that these datasets are overwhelmingly composed of lighter-skinned subjects (79.6\% for IJB-A and 86.2\% for Adience) and introduce a new facial analysis dataset which is balanced by gender and skin type. We evaluate 3 commercial gender classification systems using our dataset and show that darker-skinned females are the most misclassified group (with error rates of up to 34.7\%). The maximum error rate for lighter-skinned males is 0.8\%. The substantial disparities in the accuracy of classifying darker females, lighter females, darker males, and lighter males in gender classification systems require urgent attention if commercial companies are to build genuinely fair, transparent and accountable facial analysis algorithms.},
eventtitle = {Conference on {{Fairness}}, {{Accountability}} and {{Transparency}}},
langid = {english},
keywords = {facial recognition,photography,programming},
file = {/Users/sen/Zotero/storage/TII9WJQ3/Buolamwini and Gebru - 2018 - Gender Shades Intersectional Accuracy Disparities.pdf;/Users/sen/Zotero/storage/XZWKDPS2/Buolamwini and Gebru - 2018 - Gender Shades Intersectional Accuracy Disparities.pdf}
}
@online{buttfield-addisonEmptyPromiseAIGenerated2025,
title = {The {{Empty Promise}} of {{AI-Generated Creativity}}},
author = {Buttfield-Addison, Dr Paris},
date = {2025-03-03T00:00:00+00:00},
url = {https://hey.paris/posts/genai/},
urldate = {2025-03-03},
abstract = {“Are you going to add AI writing to Yarn Spinner?” It’s a question I hear almost weekly these days. Whether at game development conferences, online, or during meetings, there’s an assumption that every tool is racing to implement some form of generative AI. I understand the curiosity—we’re living through an unprecedented wave of AI hype, and there’s genuine confusion about where these technologies might fit into creative processes. So I thought I’d share some thoughts on why we’re taking a different path.},
langid = {english},
note = {“What strikes me about AI-generated stories, dialogue, and characters is their inherent mimicry. These systems don’t create—they rearrange existing patterns from their training data.”
\par
“What troubles me most is how AI threatens to devalue human creativity by suggesting that the creative process itself—the vision, the intention, the evolution—doesn’t matter. When we pretend an AI can write “good enough” stories, we misunderstand what stories are: not just words arranged in pleasing patterns, but acts of human connection and meaning-making.”},
file = {/Users/sen/Zotero/storage/763IR3QF/genai.html}
}
@article{chiangChatGPTBlurryJPEG2023,
entrysubtype = {magazine},
title = {{{ChatGPT Is}} a {{Blurry JPEG}} of the {{Web}}},
author = {Chiang, Ted},
date = {2023-02-09},
journaltitle = {The New Yorker},
issn = {0028-792X},
url = {https://www.newyorker.com/tech/annals-of-technology/chatgpt-is-a-blurry-jpeg-of-the-web},
urldate = {2024-07-05},
abstract = {OpenAI’s chatbot offers paraphrases, whereas Google offers quotes. Which do we prefer?},
langid = {american},
keywords = {algorithms,Artificial Intelligence,artificial intelligence (a.i.),chatgpt,GenAI,images,internet,technology,writing},
file = {/Users/sen/Zotero/storage/23CST9S5/chatgpt-is-a-blurry-jpeg-of-the-web.html}
}
@online{chiangWhyIsnGoing2024,
title = {Why {{A}}.{{I}}. {{Isn}}’t {{Going}} to {{Make Art}} | {{The New Yorker}}},
author = {Chiang, Ted},
date = {2024-09-02T12:53:53Z},
url = {https://www.newyorker.com/culture/the-weekend-essay/why-ai-isnt-going-to-make-art},
urldate = {2024-09-02},
organization = {archive.is},
note = {\href{https://archive.is/QVg0P}{Why A.I. Isn’t Going to Make Art | The New Yorker}
\par
Generative A.I. appeals to people who think they can express themselves in a medium without actually working in that medium.
\par
Using ChatGPT to complete assignments is like bringing a forklift into the weight room; you will never improve your cognitive fitness that way.
\par
\href{https://archive.is/QVg0P}{https://archive.is/QVg0P}},
file = {/Users/sen/Zotero/storage/65YTCS94/QVg0P.html}
}
@online{Chroma,
title = {Chroma},
url = {https://www.trychroma.com/},
urldate = {2024-10-24},
abstract = {Chroma is the open-source AI application database. Batteries included.},
file = {/Users/sen/Zotero/storage/XQ4GLA5P/www.trychroma.com.html}
}
@article{cottomOpinionActuallyAI2025,
entrysubtype = {newspaper},
title = {Opinion | {{Actually}}, {{A}}.{{I}}. {{Is Pretty Mid}}},
author = {Cottom, Tressie McMillan},
date = {2025-03-29},
journaltitle = {The New York Times},
issn = {0362-4331},
url = {https://www.nytimes.com/2025/03/29/opinion/ai-tech-innovation.html},
urldate = {2025-03-29},
abstract = {A.I. is just what we need in the post-fact era: less research and more predicting what we want to hear.},
journalsubtitle = {Opinion},
langid = {american},
keywords = {Artificial Intelligence,Colleges and Universities,Computers and the Internet,Innovation,Labor and Jobs,Productivity},
note = {We are using A.I. to make mediocre improvements, such as emailing more. Even the most enthusiastic papers about A.I.’s power to augment white-collar work have struggled to come up with something more exciting than “A brief that once took two days to write will now take two hours!”},
file = {/Users/sen/Zotero/storage/BK5325QX/ai-tech-innovation.html}
}
@online{daviesStupidologyWilliamDavies2025,
title = {Stupidology | {{William Davies}}},
author = {Davies, William},
date = {2025-09-10T10:56:16-04:00},
url = {https://www.nplusonemag.com/issue-51/politics/stupidology/},
urldate = {2025-10-03},
abstract = {The challenge posed by this political crisis is how to take the stupidity seriously without reducing it to a wholly mental or psychiatric, let alone genetic, phenomenon. Stupidity can be understood as a problem of social systems rather than individuals, as André Spicer and Mats Alvesson explore in their book The Stupidity Paradox. Stupidity, they write, can become “functional,” a feature of how organizations operate on a daily basis, obstructing ideas and intelligence despite the palpable negative consequences. Yet it’s hard to identify anything functional about Trumpian stupidity, which is less a form of organizational inertia or disarray than a slash-and-burn assault on the very things{$\mkern1mu$}—{$\mkern1mu$}universities, public health, market data{$\mkern1mu$}—{$\mkern1mu$}that help make the world intelligible.},
langid = {american},
organization = {n+1},
file = {/Users/sen/Zotero/storage/6T2LP74D/stupidology.html}
}
@online{DayTaughtAI2025,
title = {The Day {{I}} Taught {{AI}} to Read Code like a {{Senior Developer}}},
date = {2025-01-05T00:00:00+00:00},
url = {https://nmn.gl/blog/ai-senior-developer},
urldate = {2025-01-06},
abstract = {A messy experiment that changed how we think about AI code analysis Last week, I watched our AI choke on a React codebase - again. As timeout errors flooded my terminal, something clicked. We’d been teaching AI to read code like a fresh bootcamp grad, not a senior developer. Here’s what I mean.},
langid = {english},
organization = {N’s Blog},
file = {/Users/sen/Zotero/storage/3GQKGPCG/ai-senior-developer.html}
}
@online{deepseek-aiDeepSeekV3TechnicalReport2024,
title = {{{DeepSeek-V3 Technical Report}}},
author = {DeepSeek-AI and Liu, Aixin and Feng, Bei and Xue, Bing and Wang, Bingxuan and Wu, Bochao and Lu, Chengda and Zhao, Chenggang and Deng, Chengqi and Zhang, Chenyu and Ruan, Chong and Dai, Damai and Guo, Daya and Yang, Dejian and Chen, Deli and Ji, Dongjie and Li, Erhang and Lin, Fangyun and Dai, Fucong and Luo, Fuli and Hao, Guangbo and Chen, Guanting and Li, Guowei and Zhang, H. and Bao, Han and Xu, Hanwei and Wang, Haocheng and Zhang, Haowei and Ding, Honghui and Xin, Huajian and Gao, Huazuo and Li, Hui and Qu, Hui and Cai, J. L. and Liang, Jian and Guo, Jianzhong and Ni, Jiaqi and Li, Jiashi and Wang, Jiawei and Chen, Jin and Chen, Jingchang and Yuan, Jingyang and Qiu, Junjie and Li, Junlong and Song, Junxiao and Dong, Kai and Hu, Kai and Gao, Kaige and Guan, Kang and Huang, Kexin and Yu, Kuai and Wang, Lean and Zhang, Lecong and Xu, Lei and Xia, Leyi and Zhao, Liang and Wang, Litong and Zhang, Liyue and Li, Meng and Wang, Miaojun and Zhang, Mingchuan and Zhang, Minghua and Tang, Minghui and Li, Mingming and Tian, Ning and Huang, Panpan and Wang, Peiyi and Zhang, Peng and Wang, Qiancheng and Zhu, Qihao and Chen, Qinyu and Du, Qiushi and Chen, R. J. and Jin, R. L. and Ge, Ruiqi and Zhang, Ruisong and Pan, Ruizhe and Wang, Runji and Xu, Runxin and Zhang, Ruoyu and Chen, Ruyi and Li, S. S. and Lu, Shanghao and Zhou, Shangyan and Chen, Shanhuang and Wu, Shaoqing and Ye, Shengfeng and Ye, Shengfeng and Ma, Shirong and Wang, Shiyu and Zhou, Shuang and Yu, Shuiping and Zhou, Shunfeng and Pan, Shuting and Wang, T. and Yun, Tao and Pei, Tian and Sun, Tianyu and Xiao, W. L. and Zeng, Wangding and Zhao, Wanjia and An, Wei and Liu, Wen and Liang, Wenfeng and Gao, Wenjun and Yu, Wenqin and Zhang, Wentao and Li, X. Q. and Jin, Xiangyue and Wang, Xianzu and Bi, Xiao and Liu, Xiaodong and Wang, Xiaohan and Shen, Xiaojin and Chen, Xiaokang and Zhang, Xiaokang and Chen, Xiaosha and Nie, Xiaotao and Sun, Xiaowen and Wang, Xiaoxiang and Cheng, Xin and Liu, Xin and Xie, Xin and Liu, Xingchao and Yu, Xingkai and Song, Xinnan and Shan, Xinxia and Zhou, Xinyi and Yang, Xinyu and Li, Xinyuan and Su, Xuecheng and Lin, Xuheng and Li, Y. K. and Wang, Y. Q. and Wei, Y. X. and Zhu, Y. X. and Zhang, Yang and Xu, Yanhong and Xu, Yanhong and Huang, Yanping and Li, Yao and Zhao, Yao and Sun, Yaofeng and Li, Yaohui and Wang, Yaohui and Yu, Yi and Zheng, Yi and Zhang, Yichao and Shi, Yifan and Xiong, Yiliang and He, Ying and Tang, Ying and Piao, Yishi and Wang, Yisong and Tan, Yixuan and Ma, Yiyang and Liu, Yiyuan and Guo, Yongqiang and Wu, Yu and Ou, Yuan and Zhu, Yuchen and Wang, Yuduan and Gong, Yue and Zou, Yuheng and He, Yujia and Zha, Yukun and Xiong, Yunfan and Ma, Yunxian and Yan, Yuting and Luo, Yuxiang and You, Yuxiang and Liu, Yuxuan and Zhou, Yuyang and Wu, Z. F. and Ren, Z. Z. and Ren, Zehui and Sha, Zhangli and Fu, Zhe and Xu, Zhean and Huang, Zhen and Zhang, Zhen and Xie, Zhenda and Zhang, Zhengyan and Hao, Zhewen and Gou, Zhibin and Ma, Zhicheng and Yan, Zhigang and Shao, Zhihong and Xu, Zhipeng and Wu, Zhiyu and Zhang, Zhongyu and Li, Zhuoshu and Gu, Zihui and Zhu, Zijia and Liu, Zijun and Li, Zilin and Xie, Ziwei and Song, Ziyang and Gao, Ziyi and Pan, Zizheng},
date = {2024-12-27},
eprint = {2412.19437},
eprinttype = {arXiv},
eprintclass = {cs},
doi = {10.48550/arXiv.2412.19437},
url = {http://arxiv.org/abs/2412.19437},
urldate = {2025-02-11},
abstract = {We present DeepSeek-V3, a strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token. To achieve efficient inference and cost-effective training, DeepSeek-V3 adopts Multi-head Latent Attention (MLA) and DeepSeekMoE architectures, which were thoroughly validated in DeepSeek-V2. Furthermore, DeepSeek-V3 pioneers an auxiliary-loss-free strategy for load balancing and sets a multi-token prediction training objective for stronger performance. We pre-train DeepSeek-V3 on 14.8 trillion diverse and high-quality tokens, followed by Supervised Fine-Tuning and Reinforcement Learning stages to fully harness its capabilities. Comprehensive evaluations reveal that DeepSeek-V3 outperforms other open-source models and achieves performance comparable to leading closed-source models. Despite its excellent performance, DeepSeek-V3 requires only 2.788M H800 GPU hours for its full training. In addition, its training process is remarkably stable. Throughout the entire training process, we did not experience any irrecoverable loss spikes or perform any rollbacks. The model checkpoints are available at https://github.com/deepseek-ai/DeepSeek-V3.},
pubstate = {prepublished},
version = {1},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computation and Language},
file = {/Users/sen/Zotero/storage/Z6QWVWF4/DeepSeek-AI et al. - 2024 - DeepSeek-V3 Technical Report.pdf;/Users/sen/Zotero/storage/D75UJW7D/2412.html}
}
@online{delaneyChatBadUI2025,
title = {Chat Is a Bad {{UI}} Pattern for Development Tools—{{Daniel De Laney}}},
author = {De Laney, Daniel},
date = {2025-02-04},
url = {https://danieldelaney.net/chat/},
urldate = {2025-02-04},
file = {/Users/sen/Zotero/storage/M2IGYL2K/chat.html}
}
@online{DiveDeepLearning,
title = {Dive into {{Deep Learning}} — {{Dive}} into {{Deep Learning}} 1.0.3 Documentation},
url = {https://d2l.ai/},
urldate = {2024-10-20},
file = {/Users/sen/Zotero/storage/ENWEV6CC/d2l.ai.html}
}
@article{doi:10.1126/science.176.4035.609,
title = {On the Impact of the Computer on Society},
author = {Weizenbaum, Joseph},
date = {1972},
journaltitle = {Science},
volume = {176},
number = {4035},
eprint = {https://www.science.org/doi/pdf/10.1126/science.176.4035.609},
pages = {609--614},
doi = {10.1126/science.176.4035.609},
url = {https://www.science.org/doi/abs/10.1126/science.176.4035.609}
}
@online{dolonAmazonHasRuined2020,
title = {Amazon Has Ruined Search and {{Google}} Is in on It},
author = {Dolon, Monji},
date = {2020-01-21T14:15:00+00:00},
url = {https://monji.com/essays/amazon-has-ruined-search-and-google-is-in-on-it},
urldate = {2021-07-29},
abstract = {TL;DR -- Tried searching Google for best bicycles under \$500 and nearly all results were spammy review sites riddled with Amazon affiliate links.},
langid = {english},
organization = {Monji Dolon},
file = {/Users/sen/Zotero/storage/LN7NYX3F/amazon-has-ruined-search-and-google-is-in-on-it.html}
}
@article{Dreyfus2007-DREWHA,
title = {Why Heideggerian Ai Failed and How Fixing It Would Require Making It More Heideggerian},
author = {Dreyfus, Hubert L.},
date = {2007},
journaltitle = {Philosophical Psychology},
volume = {20},
number = {2},
pages = {247--268},
doi = {10.1080/09515080701239510},
keywords = {programming},
file = {/Users/sen/Zotero/storage/PWGAI5BA/Dreyfus - 2007 - Why heideggerian ai failed and how fixing it would.pdf}
}
@book{dreyfusMindMachinePower2009,
title = {Mind over Machine: The Power of Human Intuition and Expertise in the Era of the Computer},
shorttitle = {Mind over Machine},
author = {Dreyfus, Hubert L. and Dreyfus, Stuart E.},
date = {2009},
edition = {Repr.},
publisher = {The Free Pr},
location = {New York},
isbn = {978-0-7432-0551-1},
langid = {english},
pagetotal = {231}
}
@article{dreyfusPhenomenologySkillAcquisitionmanuscript,
title = {A Phenomenology of Skill Acquisition as the Basis for a {{Merleau-Pontian}} Nonrepresentational Cognitive Science},
author = {Dreyfus, Hubert L.},
date = {2002},
keywords = {cognitive processes,productivity}
}
@online{drummondSiliconValleyHas2025,
title = {Silicon {{Valley Has}} a {{God Problem}}},
author = {Drummond, Joshua},
date = {2025-10-23T19:09:45},
url = {https://www.webworm.co/newgods/},
urldate = {2025-11-02},
abstract = {And we're all up for sacrifice.},
langid = {english},
organization = {Webworm},
file = {/Users/sen/Zotero/storage/J3DJC8M7/newgods.html}
}
@online{eeveeRiseWhatever2025,
title = {The Rise of {{Whatever}}},
author = {Eevee},
date = {2025-07-03T17:26:00-07:00},
url = {https://eev.ee/blog/2025/07/03/the-rise-of-whatever/},
urldate = {2025-07-06},
abstract = {This was originally titled “I miss when computers were fun”. But in the course of writing it, I discovered that there is a reason computers became less fun, a dark thread woven through a number of events in recent history. Let me back up a bit.},
langid = {english},
file = {/Users/sen/Zotero/storage/K82UBF7S/the-rise-of-whatever.html}
}
@online{epsteinYourBrainDoes2016,
title = {Your Brain Does Not Process Information and It Is Not a Computer | {{Aeon Essays}}},
author = {Epstein, Robert},
date = {2016-05-18},
url = {https://aeon.co/essays/your-brain-does-not-process-information-and-it-is-not-a-computer},
urldate = {2025-09-02},
abstract = {Your brain does not process information, retrieve knowledge or store memories. In short: your brain is not a computer},
langid = {english},
organization = {Aeon},
keywords = {genai},
file = {/Users/sen/Zotero/storage/9BI6P8V8/your-brain-does-not-process-information-and-it-is-not-a-computer.html}
}
@online{FAISS,
title = {{{FAISS}}},
url = {https://ai.meta.com/tools/faiss},
urldate = {2024-09-27},
abstract = {A library that allows developers to quickly search for embeddings of multimedia documents that are similar to each other.},
langid = {english},
file = {/Users/sen/Zotero/storage/FG7MPYK2/faiss.html}
}
@online{FightingAIScraperbot,
title = {Fighting the {{AI}} Scraperbot Scourge [{{LWN}}.Net]},
author = {Corbet, Jonathan},
date = {2025-02-14},
url = {https://lwn.net/Articles/1008897/},
urldate = {2025-02-27},
file = {/Users/sen/Zotero/storage/5DWUTUPJ/1008897.html}
}
@inproceedings{foggHowUsersReciprocate1997,
title = {How Users Reciprocate to Computers: {{An}} Experiment That Demonstrates Behavior Change},
booktitle = {{{CHI}} '97 Extended Abstracts on Human Factors in Computing Systems},
author = {family=Fogg, given=BJ, given-i=BJ and Nass, Clifford},
date = {1997},
series = {{{CHI EA}} '97},
pages = {331--332},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/1120212.1120419},
url = {https://doi.org/10.1145/1120212.1120419},
abstract = {We conducted an experiment to investigate if computers could motivate users to change their behavior. By leveraging a social dynamic called the "rule of reciprocity," this experiment demonstrated that users provided more helping behavior to a computer that had helped them previously than to a different computer. Users also worked longer, performed higher quality work, and felt happier. Conversely, the data provide evidence of a retaliation effect.},
isbn = {0-89791-926-2},
pagetotal = {2},
keywords = {agents,computers are social actors,empirical studies,experiments,influence,media equation,persuasion,programming,reciprocity,retaliation,social dynamics}
}
@online{fosterAIIsntPeople2026,
title = {A.{{I}}. {{Isn}}'t {{People}}},
author = {Foster, Rusty},
date = {2026-02-23},
url = {https://www.todayintabs.com/p/a-i-isn-t-people},
urldate = {2026-02-28},
abstract = {How many Reddit posts does it take to learn to read?},
langid = {english},
organization = {Today in Tabs},
note = {I don’t need to know what mechanism underlies human intelligence to rule out the possibility that it’s the same as what a large language model does.},
file = {/Users/sen/Zotero/storage/KFULDJZS/a-i-isn-t-people.html}
}
@online{gajdosYouShouldProbably2024,
title = {You {{Should Probably Pay Attention}} to {{Tokenizers}}},
author = {Gajdos, Milos},
date = {2024-10-21},
url = {https://cybernetist.com/2024/10/21/you-should-probably-pay-attention-to-tokenizers/},
urldate = {2024-10-23},
abstract = {Last week I was helping a friend of mine to get one of his new apps off the ground. I can’t speak much about it at the moment, other than like most apps nowadays it has some AI sprinkled over it. Ok, maybe a bit maybe more just a bit – depends on the way you look at it, I suppose. There is a Retrieval-augmented generation (RAG) hiding somewhere in most of the AI apps. RAG is still all the RAGe – it even has its own Wikipedia page now! I’m not sure if anyone is tracking how fast a term reaches the point where it gets its own Wiki page but RAG must be somewhere near the top of the charts.},
langid = {english},
organization = {Cybernetist},
file = {/Users/sen/Zotero/storage/3TPQJ92R/you-should-probably-pay-attention-to-tokenizers.html}
}
@online{garfieldSelfishAIGarfieldTech,
title = {Selfish {{AI}} | {{GarfieldTech}}},
author = {Garfield, Larry},
date = {2026-02-01},
url = {https://www.garfieldtech.com/blog/selfish-ai},
urldate = {2026-02-04}
}
@online{gellyWhatHappenedWhen2025,
title = {What {{Happened When I Tried}} to {{Replace Myself}} with {{ChatGPT}} in {{My English Classroom}}},
author = {Gelly, Piers},
date = {2025-07-28T08:59:18+00:00},
url = {https://lithub.com/what-happened-when-i-tried-to-replace-myself-with-chatgpt-in-my-english-classroom/},
urldate = {2025-08-03},
abstract = {My students call it “Chat,” a cute nickname they all seem to have agreed on at some point. They use it to make study guides, interpret essay prompts, and register for classes, turning it loose on t…},
langid = {american},
organization = {Literary Hub},
keywords = {A Black Story May Contain Sensitive Content,AI,ChatGPT,creative writing,English,Lilian-Yvonne Bertram,Piers Gelly,Searches: Selfhood in the Digital Age,teaching,Vauhini Vara}
}
@online{GenerativeAIAdoption,
title = {Generative {{AI Adoption Toolkit}}},
url = {https://www.pluralsight.com/product/flow/developer-success-lab/dsl-navigate-toolkit},
urldate = {2025-02-04},
abstract = {Harness the Developer Success Lab’s findings around AI Skill Threat, Identity Change and Developer Thriving to help your teams adopt AI-assisted coding tools and practices in a human-centered, evidence-based way.},
langid = {english},
note = {Moved to \href{https://www.pluralsight.com/resources/blog/ai-and-data/generative-ai-toolkit}{https://www.pluralsight.com/resources/blog/ai-and-data/generative-ai-toolkit}},
file = {/Users/sen/Zotero/storage/NBDAH5YC/dsl-navigate-toolkit.html}
}
@online{gentAIMisinformationBullshit2025,
title = {{{AI Misinformation}}: {{The Bullshit Index Explained}} - {{IEEE Spectrum}}},
shorttitle = {{{AI Misinformation}}},
author = {Gent, Edd},
date = {2025-08-12},
url = {https://spectrum.ieee.org/ai-misinformation-llm-bullshit},
urldate = {2025-08-13},
abstract = {Explore how AI misinformation arises from models' indifference to truth, driven by reinforcement learning techniques.},
langid = {english},
file = {/Users/sen/Zotero/storage/F8TK26BI/ai-misinformation-llm-bullshit.html}
}
@online{gerardGenerativeAIRuns2025,
title = {Generative {{AI}} Runs on Gambling Addiction — Just One More Prompt, Bro!},
author = {Gerard, David},
date = {2025-06-05T17:29:04+00:00},
url = {https://pivot-to-ai.com/2025/06/05/generative-ai-runs-on-gambling-addiction-just-one-more-prompt-bro/},
urldate = {2025-06-06},
abstract = {You’ll have noticed how previously normal people start acting like addicts to their favourite generative AI and shout at you like you’re trying to take their cocaine away. Matthias Döpm…},
langid = {british},
organization = {Pivot to AI},
file = {/Users/sen/Zotero/storage/TNDUZ48J/generative-ai-runs-on-gambling-addiction-just-one-more-prompt-bro.html}
}
@article{gettierJustifiedTrueBelief1963,
title = {Is {{Justified True Belief Knowledge}}?},
author = {Gettier, E. L.},
date = {1963-06-01},
journaltitle = {Analysis},
shortjournal = {Analysis},
volume = {23},
number = {6},
pages = {121--123},
issn = {0003-2638, 1467-8284},
doi = {10.1093/analys/23.6.121},
url = {https://academic.oup.com/analysis/article-lookup/doi/10.1093/analys/23.6.121},
urldate = {2022-07-09},
langid = {english},
keywords = {cognitive processes,programming},
file = {/Users/sen/Zotero/storage/WHP8EVTY/Gettier - 1963 - Is Justified True Belief Knowledge.pdf}
}
@online{guestUncriticalAdoptionAI2025,
title = {Against the {{Uncritical Adoption}} of '{{AI}}' {{Technologies}} in {{Academia}}},
author = {Guest, Olivia and Suarez, Marcela and Müller, Barbara and family=Meerkerk, given=Edwin, prefix=van, useprefix=true and Oude Groote Beverborg, Arnoud and family=Haan, given=Ronald, prefix=de, useprefix=true and Reyes Elizondo, Andrea and Blokpoel, Mark and Scharfenberg, Natalia and Kleinherenbrink, Annelies and Camerino, Ileana and Woensdregt, Marieke and Monett, Dagmar and Brown, Jed and Avraamidou, Lucy and Alenda-Demoutiez, Juliette and Hermans, Felienne and family=Rooij, given=Iris, prefix=van, useprefix=true},
date = {2025-09-05},
eprinttype = {Zenodo},
doi = {10.5281/ZENODO.17065098},
url = {https://zenodo.org/doi/10.5281/zenodo.17065098},
urldate = {2025-09-25},
abstract = {Under the banner of progress, products have been uncritically adopted or even imposed on users — in past centuries with tobacco and combustion engines, and in the 21st with social media. For these collective blunders, we now regret our involvement or apathy as scientists, and society struggles to put the genie back in the bottle. Currently, we are similarly entangled with artificial intelligence (AI) technology. For example, software updates are rolled out seamlessly and non-consensually, Microsoft Office is bundled with chatbots, and we, our students, and our employers have had no say, as it is not considered a valid position to reject AI technologies in our teaching and research. This is why in June 2025, we co-authored an Open Letter calling on our employers to reverse and rethink their stance on uncritically adopting AI technologies. In this position piece, we expound on why universities must take their role seriously to a) counter the technology industry's marketing, hype, and harm; and to b) safeguard higher education, critical thinking, expertise, academic freedom, and scientific integrity. We include pointers to relevant work to further inform our colleagues.},
pubstate = {prepublished},
keywords = {artificial intelligence,critical analysis,digital technology,higher education,open letter,policy}
}
@online{halifaxRoshiJoanHalifax2023,
title = {Roshi {{Joan Halifax}}: {{What Has AI Got To Do}} with {{Buddhism}}?},
shorttitle = {Roshi {{Joan Halifax}}},
author = {Halifax, Joan},
date = {2023-10-31T16:46:18+00:00},
url = {https://www.upaya.org/2023/10/roshi-joan-halifax-what-has-ai-got-to-do-with-buddhism/},
urldate = {2025-01-31},
abstract = {A talk given at the Wisdom 2.0 Summit on October 30, 2023. We are in a very different world than when I first looked at a computer printout in 1965. Today, data processing, information, disinformation…},
langid = {american},
organization = {Upaya Zen Center},
keywords = {genai,zen},
file = {/Users/sen/Zotero/storage/WIRMRNBT/roshi-joan-halifax-what-has-ai-got-to-do-with-buddhism.html}
}
@online{hebertGapWhichWe2025,
title = {The {{Gap Through Which We Praise}} the {{Machine}}},
author = {Hebert, Fred},
date = {2025-06-09},
url = {https://ferd.ca/the-gap-through-which-we-praise-the-machine.html},
urldate = {2025-06-09},
abstract = {My current theory of agentic programming: people are amazing at adapting the tools they're given and totally underestimate the extent to which they do it, and the amount of skill we build doing that is an incidental consequence of how badly the tools are designed.},
langid = {english},
file = {/Users/sen/Zotero/storage/I9TWY5PA/the-gap-through-which-we-praise-the-machine.html}
}
@article{hicksChatGPTBullshit2024,
title = {{{ChatGPT}} Is Bullshit},
author = {Hicks, Michael Townsen and Humphries, James and Slater, Joe},
date = {2024-06-08},
journaltitle = {Ethics and Information Technology},
shortjournal = {Ethics Inf Technol},
volume = {26},
number = {2},
pages = {38},
issn = {1572-8439},
doi = {10.1007/s10676-024-09775-5},
url = {https://doi.org/10.1007/s10676-024-09775-5},
urldate = {2024-07-18},
abstract = {Recently, there has been considerable interest in large language models: machine learning systems which produce human-like text and dialogue. Applications of these systems have been plagued by persistent inaccuracies in their output; these are often called “AI hallucinations”. We argue that these falsehoods, and the overall activity of large language models, is better understood as bullshit in the sense explored by Frankfurt (On Bullshit, Princeton, 2005): the models are in an important way indifferent to the truth of their outputs. We distinguish two ways in which the models can be said to be bullshitters, and argue that they clearly meet at least one of these definitions. We further argue that describing AI misrepresentations as bullshit is both a more useful and more accurate way of predicting and discussing the behaviour of these systems.},
langid = {english},
keywords = {Artificial intelligence,Assertion,Bullshit,ChatGPT,Content,Frankfurt,Large language models,LLMs},
file = {/Users/sen/Zotero/storage/7HB7U23Z/Hicks et al. - 2024 - ChatGPT is bullshit.pdf}
}
@online{hicksCognitiveHelmetsAI2025,
title = {Cognitive {{Helmets}} for the {{AI Bicycle}}: {{Part}} 1},
shorttitle = {Cognitive {{Helmets}} for the {{AI Bicycle}}},
author = {Hicks, Cat},
date = {2025-10-18T22:13:47},
url = {https://www.fightforthehuman.com/cognitive-helmets-for-the-ai-bicycle-part-1/},
urldate = {2026-02-05},
abstract = {I hear people name these three fears: will developers lose their problem-solving skills, learning opportunities, and critical thinking? One science-backed area can help: better metacognitive strategies.},
langid = {english},
organization = {Fight for the Human},
file = {/Users/sen/Zotero/storage/WIZTATPC/cognitive-helmets-for-the-ai-bicycle-part-1.html}
}
@online{hicksNewDeveloperAI2024,
title = {The {{New Developer}}: {{AI Skill Threat}}, {{Identity Change}} \& {{Developer Thriving}} in the {{Transition}} to {{AI-Assisted Software Development}}},
shorttitle = {The {{New Developer}}},
author = {Hicks, Catherine and Lee, Carol and Foster-Marks, Kristen},
date = {2024-04-20},
eprinttype = {OSF},
doi = {10.31234/osf.io/2gej5},
url = {https://osf.io/2gej5_v1},
urldate = {2025-05-28},
abstract = {In this research study, we share original empirical research with 3000+ software engineers and developers across 12+ industries engaged in the transition to Generative AI-assisted software work. We bring a human-centered approach to pressing questions that engineering organizations are facing on the rapidly-changing possibilities of AI-assisted coding. How are developers impacted by changing demands on their roles? Where might there be emerging equity \& opportunity gaps in who has access to these new development capabilities? What are the risks to the quality of technical work, and the developer productivity, thriving, and motivation which drive that technical work? From this work we present a new evidence-based framework to help developers, engineering managers, and leaders as they grapple with failure to thrive in the transition to AI-assisted work: AI Skill Threat. AI Skill Threat describes developers’ fear, anxiety, and worry that their current skills will quickly become obsolete as they adapt to AI-assisted coding. Our framework also predicts when and why AI Skill Threat emerges: engineers who maintain a strong belief in competition and the demonstration of “innate brilliance” are more likely to report AI Skill Threat. However, engineers who report the presence of learning cultures and belonging are less likely to report AI Skill Threat. Finally, we document new and emerging equity and opportunity gaps for software teams adopting new tooling practices. AI Skill Threat is higher for Racially Minoritized developers, who also rate the overall quality of AI-assisted coding outputs significantly lower. Both female developers and LGBTQ+ developers were significantly less likely to report plans to upskilling for new AI-assisted workflows. These and other emerging differences point toward a critical need to understand how organizations ensure that AI-assisted coding adoption is equitable and accessible, and that key insights from developers with important perspectives on the risks of AI-assisted coding are heard.},
langid = {american},
pubstate = {prepublished},
keywords = {belonging,emerging technologies,equity gaps,generative AI,learning,software developers,software development,software team practices,team health,technology teams},
file = {/Users/sen/Zotero/storage/M4X66DAS/Hicks et al. - 2024 - The New Developer AI Skill Threat, Identity Chang.pdf}
}
@online{hoelWelcomeSemanticApocalypse2022,
title = {Welcome to the Semantic Apocalypse},
author = {Hoel, Erik},
date = {2022-03-16},
url = {https://www.theintrinsicperspective.com/p/welcome-to-the-semantic-apocalypse},
urldate = {2025-04-01},
abstract = {Studio Ghibli style and the draining of meaning},
langid = {english},
file = {/Users/sen/Zotero/storage/2LME855A/welcome-to-the-semantic-apocalypse.html}
}
@online{HowArgueAI2025,
title = {How {{To Argue With An AI Booster}}},
date = {2025-08-25T15:39:01},
url = {https://www.wheresyoured.at/how-to-argue-with-an-ai-booster/},
urldate = {2025-08-25},
abstract = {Editor's Note: For those of you reading via email, I recommend opening this in a browser so you can use the Table of Contents. This is my longest newsletter - a 16,000-word-long opus - and if you like it, please subscribe to my premium newsletter. Thanks for reading! In},
langid = {english},
organization = {Ed Zitron's Where's Your Ed At},
keywords = {genai},
file = {/Users/sen/Zotero/storage/M2Y5NMKF/how-to-argue-with-an-ai-booster.html}
}
@online{howellServeManRelationship2023,
title = {To {{Serve Man}} - {{The Relationship}} of {{Humanity}} to {{Technology}}},
author = {Howell, C.W.},
date = {2023-06-11T09:45:16},
url = {https://www.cwhowell.com/to-serve-man/},
urldate = {2025-05-11},
abstract = {An explanation for this site. We can neither embrace technology uncritically, nor renounce it totally.},
langid = {english},
organization = {C.W. Howell},
file = {/Users/sen/Zotero/storage/F2TW2B4D/to-serve-man.html}
}
@online{InformationGreyGoo2024,
title = {The Information Grey Goo},
date = {2024-01-24T17:04:08},
url = {https://www.ianbetteridge.com/the-information-grey-goo/},
urldate = {2026-02-10},
abstract = {I’m broadly positive about the future of LLMs and AI, but no one should pretend there will not be difficulties or that the transition to using machines isn’t going to pose plenty of challenges.~ Some scenarios, though, are~profoundly~dangerous, not just for the publishing and creative industries,},
langid = {english},
organization = {Ian Betteridge},
file = {/Users/sen/Zotero/storage/CDNCPF9Y/the-information-grey-goo.html}
}
@online{jensonTimmyTrapScott2025,
title = {The {{Timmy Trap}} – {{Scott Jenson}}},
author = {Jenson, Scott},
date = {2025-08-10},
url = {https://jenson.org/timmy/},
urldate = {2025-08-13},
langid = {american},
note = {Part II of a series. Part I is \href{https://jenson.org/hype/}{Hype is a Business Tool}},
file = {/Users/sen/Zotero/storage/5939HZ9G/timmy.html}
}
@book{johnsonEmergenceConnectedLives2001,
title = {Emergence: The Connected Lives of Ants, Brains, Cities, and Software},
shorttitle = {Emergence},
author = {Johnson, Steven},
date = {2001},
publisher = {Scribner},
location = {New York},
abstract = {Emergence is what happens when an interconnected system of relatively simple elements self-organizes to form more intelligent, more adaptive higher-level behavior. It's a bottom-up model rather than being engineered by a general or a master planner, emergence begins at the ground level. Systems that at first glance seem vastly different--ant colonies, human brains, cities, immune systems--all turn out to follow the rules of emergence. In each of these systems, agents residing on one scale start producing behavior that lies a scale above them: ants create colonies, urbanites create neighborhoods. Author Steven Johnson takes readers on an eye-opening intellectual journey from the discovery of emergence to its applications. He introduces us to our everyday surroundings, offering surprising examples of feedback, self-organization, and adaptive learning. Drawing upon evolutionary theory, urban studies, neuroscience, and computer games, Emergence is a guidebook to one of the key components of twenty-first-century culture. Until recently, Johnson explains, the disparate philosophers of emergence have worked to interpret the world. But today they are starting to change it. This book is the riveting story of that change and what it means for the future.},
isbn = {978-0-684-86875-2 978-0-684-86876-9},
langid = {english},
keywords = {emergent systems,programming,software},
annotation = {OCLC: 46858386}
}
@online{jollimoreUsedTeachStudents2025,
title = {I {{Used}} to {{Teach Students}}. {{Now I Catch ChatGPT Cheats}} | {{The Walrus}}},
author = {Jollimore, Troy},
date = {2025-03-05T11:30:29+00:00},
url = {https://thewalrus.ca/i-used-to-teach-students-now-i-catch-chatgpt-cheats/},
urldate = {2025-03-07},
abstract = {I once believed university was a shared intellectual pursuit. That faith has been obliterated},
langid = {american},
note = {That moment, when you start to understand the power of clear thinking, is crucial. The trouble with generative AI is that it short-circuits that process entirely.},
file = {/Users/sen/Zotero/storage/32AFZKUW/i-used-to-teach-students-now-i-catch-chatgpt-cheats.html}
}
@inproceedings{leeImpactGenerativeAI2025,
title = {The {{Impact}} of {{Generative AI}} on {{Critical Thinking}}: {{Self-Reported Reductions}} in {{Cognitive Effort}} and {{Confidence Effects From}} a {{Survey}} of {{Knowledge Workers}}},
shorttitle = {The {{Impact}} of {{Generative AI}} on {{Critical Thinking}}},
author = {Lee, Hao-Ping (Hank) and Sarkar, Advait and Tankelevitch, Lev and Drosos, Ian and Rintel, Sean and Banks, Richard and Wilson, Nicholas},
date = {2025-04-01},
url = {https://www.microsoft.com/en-us/research/publication/the-impact-of-generative-ai-on-critical-thinking-self-reported-reductions-in-cognitive-effort-and-confidence-effects-from-a-survey-of-knowledge-workers/},
urldate = {2025-02-16},
abstract = {The rise of Generative AI (GenAI) in knowledge workflows raises questions about its impact on critical thinking skills and practices. We survey 319 knowledge workers to investigate 1) when and how they perceive the enaction of critical thinking when using GenAI, and 2) when and why GenAI affects their effort to do so. Participants shared […]},
eventtitle = {Proceedings of the {{ACM CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
langid = {american},
file = {/Users/sen/Zotero/storage/N93KXNFU/Lee et al. - 2025 - The Impact of Generative AI on Critical Thinking .pdf}
}
@online{lefkowitzThinkImDone,
title = {I {{Think I}}’m {{Done Thinking About genAI For Now}}},
author = {Lefkowitz, Glyph},
date = {2025-06-04},
url = {https://blog.glyph.im/2025/06/i-think-im-done-thinking-about-genai-for-now.html},
urldate = {2026-02-10},
abstract = {The conversation isn’t over, but I don’t think I have much to add to it.},
langid = {english},
file = {/Users/sen/Zotero/storage/3GHW49JR/i-think-im-done-thinking-about-genai-for-now.html}
}
@online{maiberg*MicrosoftStudyFinds2025,
title = {Microsoft {{Study Finds AI Makes Human Cognition}} “{{Atrophied}} and {{Unprepared}}”},
author = {Maiberg ·, Emanuel},
date = {2025-02-10T15:26:45},
url = {https://www.404media.co/microsoft-study-finds-ai-makes-human-cognition-atrophied-and-unprepared-3/},
urldate = {2025-02-20},
abstract = {Researchers find that the more people use AI at their job, the less critical thinking they use.},
langid = {english},
organization = {404 Media},
file = {/Users/sen/Zotero/storage/7CG6BX2L/microsoft-study-finds-ai-makes-human-cognition-atrophied-and-unprepared-3.html}
}
@misc{marchal2024generativeaimisusetaxonomy,
title = {Generative {{AI}} Misuse: A Taxonomy of Tactics and Insights from Real-World Data},
author = {Marchal, Nahema and Xu, Rachel and Elasmar, Rasmi and Gabriel, Iason and Goldberg, Beth and Isaac, William},
date = {2024},
eprint = {2406.13843},
eprinttype = {arXiv},
eprintclass = {cs.AI},
url = {https://arxiv.org/abs/2406.13843},
keywords = {Artificial Intelligence,artificial intelligence (a.i.),GenAI}
}
@online{marcusDeepLearningHitting2022,
title = {Deep {{Learning Is Hitting}} a {{Wall}}},
author = {Marcus, Gary},
date = {2022-03-10T07:05:32+00:00},
url = {https://nautil.us/deep-learning-is-hitting-a-wall-238440/},
urldate = {2025-08-10},
abstract = {What would it take for artificial intelligence to make real progress?},
langid = {american},
organization = {Nautilus},
note = {\href{https://archive.ph/6hEYS}{https://archive.ph/6hEYS}},
file = {/Users/sen/Zotero/storage/LZELKBC4/deep-learning-is-hitting-a-wall-238440.html}
}
@online{marcusGPT5OverdueOverhyped2025,
type = {Substack newsletter},
title = {{{GPT-5}}: {{Overdue}}, Overhyped and Underwhelming. {{And}} That’s Not the Worst of It.},
shorttitle = {{{GPT-5}}},
author = {Marcus, Gary},
date = {2025-08-09},
url = {https://garymarcus.substack.com/p/gpt-5-overdue-overhyped-and-underwhelming},
urldate = {2025-08-10},
abstract = {A new release botched … and a breaking research new paper that spells trouble},
organization = {Marcus on AI},
file = {/Users/sen/Zotero/storage/FMEBVAGY/gpt-5-overdue-overhyped-and-underwhelming.html}
}
@online{marcusHumanitysOhShit2024,
type = {Substack newsletter},
title = {Humanity’s “{{Oh}} Shit!” {{AI}} Moment?},
author = {Marcus, Gary},
date = {2024-12-12},
url = {https://garymarcus.substack.com/p/humanitys-oh-shit-ai-moment},
urldate = {2024-12-30},
abstract = {Not yet, but it could come sooner than you think. Not because we are close to AGI, but because we already have machines that can say one thing and do something else altogether.},
organization = {Marcus on AI},
keywords = {Artificial Intelligence}
}
@online{marcusThatOpenAIBreakthrough2023,
type = {Substack newsletter},
title = {About That {{OpenAI}} “Breakthrough”},
author = {Marcus, Gary},
date = {2023-11-23},
url = {https://garymarcus.substack.com/p/about-that-openai-breakthrough},
urldate = {2023-11-23},
abstract = {Will OpenAI’s Q* change the world?},
organization = {Marcus on AI},
keywords = {programming},
file = {/Users/sen/Zotero/storage/CCGJIU67/about-that-openai-breakthrough.html}
}
@online{martsinovichItsRudeShow,
title = {It's Rude to Show {{AI}} Output to People | {{Alex Martsinovich}}},
author = {Martsinovich, Alex},
date = {2025-07-04},
url = {https://distantprovince.by/posts/its-rude-to-show-ai-output-to-people/},
urldate = {2025-12-09},
abstract = {Feeding slop is an act of war},
langid = {english},
file = {/Users/sen/Zotero/storage/TABEWEQ4/its-rude-to-show-ai-output-to-people.html}
}
@online{mcmahonBubbleTrouble2025,
title = {Bubble {{Trouble}}},
author = {McMahon, Bryan},
date = {2025-03-25T05:30:00},
url = {https://prospect.org/api/content/3a056fc2-090c-11f0-9b9f-12163087a831/},
urldate = {2025-03-25},
abstract = {An AI bubble threatens Silicon Valley, and all of us.},
langid = {american},
organization = {The American Prospect},
note = {For many workers, AI tools reduce their productivity by increasing the volume of steps needed to complete a given task.},
file = {/Users/sen/Zotero/storage/3RY73LHT/2025-03-25-bubble-trouble-ai-threat.html}
}
@online{mohamedMagicNumbers2022,
title = {Magic {{Numbers}}},
author = {Mohamed, Alana},
date = {2022-05-12},
url = {https://reallifemag.com/magic-numbers/},
urldate = {2022-05-14},
abstract = {Treating "the algorithm" as a kind of~divine~power misunderstands where algorithmic power comes from},
organization = {Real Life},
keywords = {distractories},
file = {/Users/sen/Zotero/storage/ZU3AF2F8/magic-numbers.html}
}
@online{monteiroHowSurviveWeight2025,
title = {How to Survive the Weight of an Entire Industry Trying to Convince You That You're Inadequate},
author = {Monteiro, Mike},
date = {2025-05-28},
url = {https://buttondown.com/monteiro/archive/how-to-survive-the-weight-of-an-entire-industry/},
urldate = {2025-07-20},
abstract = {Look at all this evidence of humanity! If you enjoy the newsletter gimme \$2. This week’s question comes to us from DB: How do I make art without feeling...},
langid = {english},
file = {/Users/sen/Zotero/storage/X9JQJGYI/how-to-survive-the-weight-of-an-entire-industry.html}
}
@online{musicnotesAIResearchersWrong2025,
title = {{{AI}} Researchers' Wrong Theory of Cognition Is Making Us Worry about the Wrong Kind of {{AI}} Apocalypse},
author = {Music (notes), Exit},
date = {2025-12-05T20:38:38Z},
url = {https://exitmusic.world/ai-researchers-wrong-theory-of-cognition-is-making-us-worry-about-the-wrong},
urldate = {2025-12-06},
abstract = {I originally wrote this (in 18 minutes!) as a stream-of-consciousness Mastodon thread. Thought it might be worth putting it all together ...},
langid = {english},
organization = {Exit Music (notes)},
keywords = {genai},
file = {/Users/sen/Zotero/storage/F2AQZT9Q/ai-researchers-wrong-theory-of-cognition-is-making-us-worry-about-the-wrong.html}
}
@online{narayananAINormalTechnology2025,
title = {{{AI}} as {{Normal Technology}}},
author = {Narayanan, Arvind and Kapoor, Sayash},
date = {2025-04-15},
url = {http://knightcolumbia.org/content/ai-as-normal-technology},
urldate = {2025-04-17},
langid = {english},
organization = {Knight First Amendment Institute}
}
@online{nastruzziSemanticAblationWhy2026,
title = {Semantic Ablation: {{Why AI}} Writing Is Boring and Dangerous},
shorttitle = {Semantic Ablation},
author = {Nastruzzi, Claudio},
date = {2026-02-16},
url = {https://www.theregister.com/2026/02/16/semantic_ablation_ai_writing/},
urldate = {2026-02-19},
abstract = {opinion: The subtractive bias we're ignoring},
langid = {english},
organization = {The Register},
file = {/Users/sen/Zotero/storage/PDJ5K6WH/semantic_ablation_ai_writing.html}
}
@online{nathanUnbundlingGraphGraphRAG2024,
title = {Unbundling the {{Graph}} in {{GraphRAG}}},
author = {Nathan, Paco},
date = {2024-11-19T06:30:46-05:00},
url = {https://www.oreilly.com/radar/unbundling-the-graph-in-graphrag/},
urldate = {2024-12-05},
langid = {american},
organization = {O’Reilly Media},
file = {/Users/sen/Zotero/storage/T7K6C7AR/unbundling-the-graph-in-graphrag.html}
}
@online{nervigWhatAIMeans2024,
title = {What {{AI Means}} for {{Buddhism}}},
author = {Nervig, Ross},
date = {2024-03-29},
url = {https://www.lionsroar.com/what-a-i-means-for-buddhism/},
urldate = {2025-05-28},
abstract = {AI can articulate the sum total of human knowledge, but can it help us cultivate wisdom and compassion—or is it a danger on the spiritual path? Ross\;Nervig investigates.},
langid = {american},
organization = {Lion’s Roar},
keywords = {buddhism,genai},
file = {/Users/sen/Zotero/storage/DZIPXJEC/what-a-i-means-for-buddhism.html}
}
@inproceedings{NEURIPS2023_adc98a26,
title = {Are Emergent Abilities of Large Language Models a Mirage?},
booktitle = {Advances in Neural Information Processing Systems},
author = {Schaeffer, Rylan and Miranda, Brando and Koyejo, Sanmi},
editor = {Oh, A. and Naumann, T. and Globerson, A. and Saenko, K. and Hardt, M. and Levine, S.},
date = {2023},
volume = {36},
pages = {55565--55581},
publisher = {Curran Associates, Inc.},
url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/adc98a266f45005c403b8311ca7e8bd7-Paper-Conference.pdf},
file = {/Users/sen/Zotero/storage/ATV2U8N4/Schaeffer et al. - 2023 - Are emergent abilities of large language models a .pdf}
}
@online{ngFourAIAgent2024,
title = {Four {{AI Agent Strategies That Improve GPT-4}} and {{GPT-3}}.5 {{Performance}}},
author = {Ng, Andrew},
date = {2024-03-20T14:45:48.000-07:00},
url = {https://www.deeplearning.ai/the-batch/how-agents-can-improve-llm-performance/},
urldate = {2024-12-30},
abstract = {I think AI agent workflows will drive massive AI progress this year — perhaps even more than the next generation of foundation models. This is an important...},
langid = {english},
organization = {Four AI Agent Strategies That Improve GPT-4 and GPT-3.5 Performance},
file = {/Users/sen/Zotero/storage/PIBAX948/how-agents-can-improve-llm-performance.html}
}
@article{niederhofferAIGeneratedWorkslopDestroying2025,
entrysubtype = {magazine},
title = {{{AI-Generated}} “{{Workslop}}” {{Is Destroying Productivity}}},
author = {Niederhoffer, Kate and Kellerman, Gabriella Rosen and Lee, Angela and Liebscher, Alex and Rapuano, Kristina and Hancock, Jeffrey T.},
date = {2025-09-22},
journaltitle = {Harvard Business Review},
issn = {0017-8012},
url = {https://hbr.org/2025/09/ai-generated-workslop-is-destroying-productivity?ab=HP-hero-featured-1, https://hbr.org/2025/09/ai-generated-workslop-is-destroying-productivity?ab=HP-hero-featured-1},
urldate = {2025-09-24},
langid = {english},
keywords = {genai},
file = {/Users/sen/Zotero/storage/HQLCX284/ai-generated-workslop-is-destroying-productivity.html}
}
@online{NoAINot2025,
title = {No, {{AI}} Is Not {{Making Engineers}} 10x as {{Productive}}},
date = {2025-08-05},
url = {https://colton.dev/blog/curing-your-ai-10x-engineer-imposter-syndrome/?ref=wheresyoured.at},
urldate = {2025-10-20},
abstract = {Curing Your AI 10x Engineer Imposter Syndrome},
langid = {english},
file = {/Users/sen/Zotero/storage/6MC933WZ/curing-your-ai-10x-engineer-imposter-syndrome.html}
}
@online{nortonWhatWeTalk2025,
title = {What {{We Talk About When We Talk About AI}} ({{Part}} One)},
author = {Norton, Quinn},
date = {2025-02-14T12:42:11+00:00},
url = {https://www.emptywheel.net/2025/02/14/what-we-talk-about-when-we-talk-about-ai-part-one/},
urldate = {2025-02-14},
abstract = {A Normal Person’s Explainer on What Generative AI is and Does Part 1 – In the Beginning was the Chatbot “Are you comfortably seated? Yes, well, let’s begin.” *Clears throat theatrically* “Our experience, in natural theology, can never furnish a true and demonstrated science, because, like the discipline of practical reason, it can not take […]},
langid = {american},
organization = {emptywheel},
file = {/Users/sen/Zotero/storage/KZRCJEED/what-we-talk-about-when-we-talk-about-ai-part-one.html}
}
@online{nortonWhatWeTalk2025a,
title = {What {{We Talk About When We Talk About AI}} ({{Part Two}})},
author = {Norton, Quinn},
date = {2025-02-21T13:34:29+00:00},
url = {https://www.emptywheel.net/2025/02/21/what-we-talk-about-when-we-talk-about-ai-part-two/},
urldate = {2025-02-21},
abstract = {The Other Half of the AI relationship Part 2: Pareidolia as a Service When trying to understand AI, and in particular Large Language Models, we spend a lot of time concentrating on their architectures, structures, and outputs. We look at them with a technical eye. We peer so close and attentively to AI that we […]},
langid = {american},
organization = {emptywheel},
note = {Quotes
\par
\subsubsection{Pareidolia as a Service}
\par
“next word Markov chain running on top of a lot of vector math and statistics.”
\par
“In reality, they are tokenized human creativity, remixed and fed back to us.”
\par
“Our AI creations mimic us, because we’re they’re data.”},
file = {/Users/sen/Zotero/storage/4V8IZB89/what-we-talk-about-when-we-talk-about-ai-part-two.html}
}
@online{nostaLargeLanguageModels2024,
title = {Large {{Language Models}} and the {{Path}} to {{Our Higher Self}} | {{Psychology Today}}},
author = {Nosta, John},
date = {2024-11-18},
url = {https://www.psychologytoday.com/intl/blog/the-digital-self/202411/large-language-models-and-the-path-to-our-higher-self},
urldate = {2025-01-19},
abstract = {LLMs are transformative tools that enhance human creativity and thought, offering new ways to imagine, create, and connect with the world around us.},
langid = {english},
file = {/Users/sen/Zotero/storage/PXLCFL2S/large-language-models-and-the-path-to-our-higher-self.html}
}
@online{NotBestPath2025,
title = {‘{{Not}} on the {{Best Path}}’ – {{Communications}} of the {{ACM}}},
date = {2025-02-13},
url = {https://cacm.acm.org/opinion/not-on-the-best-path/},
urldate = {2025-02-15},
langid = {american}
}
@online{olechLlamacppGuideRunning2024,
title = {Llama.Cpp Guide - {{Running LLMs}} Locally, on Any Hardware, from Scratch},
author = {Olech, Wojciech},
date = {2024-12-25},
url = {https://steelph0enix.github.io/posts/llama-cpp-guide/},
urldate = {2025-02-02},
abstract = {Psst, kid, want some cheap and small LLMs?},
langid = {english}
}
@online{ongwesojrPhonyComfortsUseful2024,
type = {Substack newsletter},
title = {The Phony Comforts of Useful Idiots},
author = {Ongweso Jr, Edward},
date = {2024-12-08},
url = {https://thetechbubble.substack.com/p/the-phony-comforts-of-useful-idiots},
urldate = {2025-02-02},
abstract = {On Casey Newton and the shallowness of anti-skepticism.},
organization = {The Tech Bubble},
file = {/Users/sen/Zotero/storage/RUYFJ8X7/the-phony-comforts-of-useful-idiots.html}
}
@online{OpenAIDeepResearch,
title = {{{OpenAI Deep Research}} - {{Six Strange Failures}}},
url = {https://futuresearch.ai/oaidr-feb-2025},
urldate = {2025-02-20},
abstract = {Six cas studies of OpenAI Deep Research going wrong on web research tasks humans can solve, and what we learned how when to use, and when not to use, OpenAI Deep Research for serious work.},
langid = {american},
organization = {FUTURESEARCH},
file = {/Users/sen/Zotero/storage/XK9SE9GH/oaidr-feb-2025.html}
}
@online{OpinionForumHow2025,
title = {Opinion {{Forum}} | {{How AI Is Changing Higher Education}}},
date = {2025-11-06T00:44:24.251},
url = {https://www.chronicle.com/article/how-ai-is-changing-higher-education},
urldate = {2025-11-20},
abstract = {The technology is reshaping every aspect of university life. Fifteen scholars weigh in on what happens next.},
langid = {english},
organization = {The Chronicle of Higher Education},
keywords = {genai},
note = {\href{https://archive.ph/lZZys}{https://archive.ph/lZZys}},
file = {/Users/sen/Zotero/storage/W4QUIEPC/how-ai-is-changing-higher-education.html}
}
@online{PeopleRefusingUse2025,
title = {The People Refusing to Use {{AI}}},
date = {2025-05-05},
url = {https://www.bbc.com/news/articles/c15q5qzdjqxo},
urldate = {2025-05-06},
abstract = {Worried about the environment and the loss of skills, some people are resisting the rise of AI.},
langid = {british},
file = {/Users/sen/Zotero/storage/545G27SL/c15q5qzdjqxo.html}
}
@inproceedings{rajiSavingFaceInvestigating2020,
title = {Saving {{Face}}: {{Investigating}} the {{Ethical Concerns}} of {{Facial Recognition Auditing}}},
shorttitle = {Saving {{Face}}},
booktitle = {Proceedings of the {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}},
author = {Raji, Inioluwa Deborah and Gebru, Timnit and Mitchell, Margaret and Buolamwini, Joy and Lee, Joonseok and Denton, Emily},
date = {2020-02-07},
pages = {145--151},
publisher = {ACM},
location = {New York NY USA},
doi = {10.1145/3375627.3375820},
url = {https://dl.acm.org/doi/10.1145/3375627.3375820},
urldate = {2021-11-05},
abstract = {Although essential to revealing biased performance, well intentioned attempts at algorithmic auditing can have effects that may harm the very populations these measures are meant to protect. This concern is even more salient while auditing biometric systems such as facial recognition, where the data is sensitive and the technology is often used in ethically questionable manners. We demonstrate a set of five ethical concerns in the particular case of auditing commercial facial processing technology, highlighting additional design considerations and ethical tensions the auditor needs to be aware of so as not exacerbate or complement the harms propagated by the audited system. We go further to provide tangible illustrations of these concerns, and conclude by reflecting on what these concerns mean for the role of the algorithmic audit and the fundamental product limitations they reveal.},
eventtitle = {{{AIES}} '20: {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}},
isbn = {978-1-4503-7110-0},
langid = {english},
keywords = {facial recognition,photography,programming},
file = {/Users/sen/Zotero/storage/YGTKW3MD/Raji et al. - 2020 - Saving Face Investigating the Ethical Concerns of.pdf}
}
@online{ramelNewGitHubCopilot,
title = {New {{GitHub Copilot Research Finds}} '{{Downward Pressure}} on {{Code Quality}}' -},
author = {Ramel, By David},
date = {2024-01-25},
url = {https://visualstudiomagazine.com/Articles/2024/01/25/copilot-research.aspx},
urldate = {2024-09-27},
abstract = {'We find disconcerting trends for maintainability.'},
langid = {american},
organization = {Visual Studio Magazine},
file = {/Users/sen/Zotero/storage/WCHXEI4N/copilot-research.html}
}
@online{rheingoldAIIALarge2024,
title = {{{AI}} for {{IA}}: Large Language Models as Thinking Tools | {{Howard Rheingold}}},
shorttitle = {{{AI}} for {{IA}}},
author = {Rheingold, Howard},
date = {2024-12-22},
url = {https://www.patreon.com/posts/ai-for-ia-large-118120516},
urldate = {2025-01-19},
abstract = {Get more from Howard Rheingold on Patreon},
organization = {Patreon},
note = {\section{LLMs As Cultural Technologies}
\par
lack the intrinsic cognitive processes,intentions, or consciousness of a human agent, they are advanced statistical tools trained on vast datasets to generate responses based on patterns in human language.
\par
products of collective human knowledge and culture, reflect and amplify existing cultural practices. Not independently innovative or creative
\par
extend human cognition as augmenting enhancements to process, synthesize, and communicate information more efficiently to allow for collaborative exploration and problem-solving},
file = {/Users/sen/Zotero/storage/Y7F8CDYN/ai-for-ia-large-118120516.html}
}
@online{romeroImLosingAll2025,
title = {I’m {{Losing All Trust}} in the {{AI Industry}}},
author = {Romero, Alberto},
date = {2025-04-10},
url = {https://www.thealgorithmicbridge.com/p/im-losing-all-trust-in-the-ai-industry},
urldate = {2025-07-06},
abstract = {As a supporter, I would love not to feel this way},
langid = {english},
file = {/Users/sen/Zotero/storage/MH8JJTJ6/im-losing-all-trust-in-the-ai-industry.html}
}
@online{salvaggioChallengingMythsGenerative2024,
title = {Challenging {{The Myths}} of {{Generative AI}} | {{TechPolicy}}.{{Press}}},
author = {Salvaggio, Eryk},
date = {2024-08-29T11:24:55},
url = {https://techpolicy.press/challenging-the-myths-of-generative-ai},
urldate = {2024-09-02},
abstract = {Eryk Salvaggio says we must dispense with myths if we are to think more clearly about what AI actually is and does.},
langid = {english},
organization = {Tech Policy Press},
keywords = {GenAI,generativeAI},
note = {The premise is that statistically likely word pairings will produce a reliable reference to the information users seek.},
file = {/Users/sen/Zotero/storage/Z4PYBYQA/challenging-the-myths-of-generative-ai.html}
}
@online{samfiraThemYouShould2025,
type = {Mastodon post},
title = {Them: "You Should Use {{AI}}! {{The}} World Is Moving to Agents Soon. {{You}} Need to at Least Use Copilot or Something or You Will Be Left behind!"{{Me}}…},
shorttitle = {Them},
author = {Samfira, Gabriel Adrian (@gabriel@mastodon.samfira.com)},
date = {2025-04-13},
url = {https://mastodon.samfira.com/@gabriel/114331208324554833},
urldate = {2025-05-28},
abstract = {Them: "you should use AI! The world is moving to agents soon. You need to at least use copilot or something or you will be left behind!"Me: * proceeds to use copilot *Also me: * spends 2 hours trying to figure out where the code deadlocks *Me 2 hours later: * finds a *sync.Mutex.Lock() used twice in a function i just tabbed through that **looked** about right *},
langid = {english},
organization = {Mastodon},
file = {/Users/sen/Zotero/storage/3DK3LNVA/114331208324554833.html}
}
@online{schmalbachWTFAgenticAI2024,
title = {{{WTF}} Is {{Agentic AI}}?},
author = {Schmalbach, Vincent},
date = {2024-11-29T09:50:31+00:00},
url = {https://www.vincentschmalbach.com/wtf-is-agentic-ai/},
urldate = {2024-12-29},
abstract = {There’s been a lot of noise lately about "agentic AI," with everyone from startups to tech giants claiming it’s the next big thing. But what is it really? Here’s the thing: what people are calling…},
langid = {american},
organization = {Vincent Schmalbach},
file = {/Users/sen/Zotero/storage/ZFIU8ADG/wtf-is-agentic-ai.html}
}
@online{schwaber-cohenChunkingStrategiesLLM,
title = {Chunking {{Strategies}} for {{LLM Applications}} | {{Pinecone}}},
author = {Schwaber-Cohen, Roie},
date = {2025-06-28},
url = {https://www.pinecone.io/learn/chunking-strategies/},
urldate = {2024-10-24},
abstract = {In the context of building LLM-related applications, chunking is the process of breaking down large pieces of text into smaller segments. It’s an essential technique that helps optimize the relevance of the content we get back from a vector database once we use the LLM to embed content. In this blog post, we’ll explore if and how it helps improve efficiency and accuracy in LLM-related applications.},
langid = {english},
file = {/Users/sen/Zotero/storage/ETIF5PFE/chunking-strategies.html}
}
@online{SetTrapCatch2025,
title = {I {{Set A Trap To Catch My Students Cheating With AI}}. {{The Results Were Shocking}}.},
date = {2025-11-20T14:00:17Z},
url = {https://www.huffpost.com/entry/history-professor-ai-cheating-students_n_69178150e4b0781acfd62540},
urldate = {2025-11-21},
abstract = {"Students are not just undermining their ability to learn, but to someday lead."},
langid = {english},
organization = {HuffPost},
keywords = {GenAI}
}
@misc{shojaeeIllusionThinkingUnderstanding2025,
title = {The Illusion of Thinking: {{Understanding}} the Strengths and Limitations of Reasoning Models via the Lens of Problem Complexity},
author = {Shojaee, Parshin and Mirzadeh, Iman and Alizadeh, Keivan and Horton, Maxwell and Bengio, Samy and Farajtabar, Mehrdad},
date = {2025},
url = {https://ml-site.cdn-apple.com/papers/the-illusion-of-thinking.pdf},
file = {/Users/sen/Zotero/storage/R9P6NYB5/Shojaee† et al. - 2025 - The illusion of thinking Understanding the streng.pdf}
}
@article{shumailovAIModelsCollapse2024,
title = {{{AI}} Models Collapse When Trained on Recursively Generated Data},
author = {Shumailov, Ilia and Shumaylov, Zakhar and Zhao, Yiren and Papernot, Nicolas and Anderson, Ross and Gal, Yarin},
date = {2024-07},
journaltitle = {Nature},
volume = {631},
number = {8022},
pages = {755--759},
publisher = {Nature Publishing Group},
issn = {1476-4687},
doi = {10.1038/s41586-024-07566-y},
url = {https://www.nature.com/articles/s41586-024-07566-y},
urldate = {2025-11-20},
abstract = {Stable diffusion revolutionized image creation from descriptive text. GPT-2 (ref.\,1), GPT-3(.5) (ref.\,2) and GPT-4 (ref.\,3) demonstrated high performance across a variety of language tasks. ChatGPT introduced such language models to the public. It is now clear that generative artificial intelligence (AI) such as large language models (LLMs) is here to stay and will substantially change the ecosystem of online text and images. Here we consider what may happen to GPT-\{n\} once LLMs contribute much of the text found online. We find that indiscriminate use of model-generated content in training causes irreversible defects in the resulting models, in which tails of the original content distribution disappear. We refer to this effect as ‘model collapse’ and show that it can occur in LLMs as well as in variational autoencoders (VAEs) and Gaussian mixture models (GMMs). We build theoretical intuition behind the phenomenon and portray its ubiquity among all learned generative models. We demonstrate that it must be taken seriously if we are to sustain the benefits of training from large-scale data scraped from the web. Indeed, the value of data collected about genuine human interactions with systems will be increasingly valuable in the presence of LLM-generated content in data crawled from the Internet.},
langid = {english},
keywords = {Computational science,Computer science},
file = {/Users/sen/Zotero/storage/GY5BM99R/Shumailov et al. - 2024 - AI models collapse when trained on recursively generated data.pdf}
}
@online{somersThreepagePaperThat2019,
title = {The Three-Page Paper That Shook Philosophy: {{Gettiers}} in Software Engineering « the Jsomers.Net Blog},
shorttitle = {The Three-Page Paper That Shook Philosophy},
author = {Somers, James},
date = {2019-01-13},
url = {https://jsomers.net/blog/gettiers},
urldate = {2022-07-09},
langid = {american},
keywords = {programming},
file = {/Users/sen/Zotero/storage/KC94P469/gettiers.html}
}
@online{spichakWhyAICan2024,
title = {Why {{AI Can Push You}} to {{Make}} the {{Wrong Decision}} at {{Work}}},
author = {Spichak, Simon},
date = {2024-09-03},
url = {https://www.brainfacts.org:443/neuroscience-in-society/tech-and-the-brain/2024/why-ai-can-push-you-to-make-the-wrong-decision-at-work-090324},
urldate = {2024-09-08},
abstract = {Automation bias is the tendency to be less vigilant when a process is automated. But can we effectively check ourselves against AI before making a wrong decision?},
langid = {english},
file = {/Users/sen/Zotero/storage/MAHJRGP9/why-ai-can-push-you-to-make-the-wrong-decision-at-work-090324.html}
}
@online{spinellisBlogDds202603022026,
title = {Blog Dds: 2026-03-02 — {{Vibe}} Coding toward the Incident Horizon},
author = {Spinellis, Diomidis},
date = {2026-03-02},
url = {https://www.spinellis.gr/blog/20260302/?utm=md},
urldate = {2026-03-02},
file = {/Users/sen/Zotero/storage/HW6ZCCDI/20260302.html}
}
@online{sturmLearningIsnCollecting2016,
title = {Learning {{Isn}}’t {{Collecting}}, {{It}}’s {{Connecting}}},
author = {Sturm, Mike},
date = {2016-11-17T20:47:14},
url = {https://medium.com/personal-growth/learning-isnt-collecting-it-s-connecting-e1189837aa23},
urldate = {2022-05-09},
abstract = {A Quick Note on How We Look at Lifelong Education},
langid = {english},
organization = {Personal Growth},
keywords = {cognitive processes},
file = {/Users/sen/Zotero/storage/NDNWRKFG/learning-isnt-collecting-it-s-connecting-e1189837aa23.html}
}
@online{tanteVibeCoding2025,
title = {On "{{Vibe Coding}}"},
author = {{tante}},
date = {2025-05-23T13:10:05+00:00},
url = {https://tante.cc/2025/05/23/on-vibe-coding/},
urldate = {2025-05-24},
abstract = {The hype about the potentials (it’s always future potential, never real current use) of AI has discarded its last cycle (“reasoning models”/”deep research”, both terms being factually untrue and deeply deceiving at best) and moved to a new double whammy of “agentic AI” and “Vibe Coding”. Now “agentic AI” basically just means that some LLM […]},
langid = {american},
organization = {Smashing Frames},
file = {/Users/sen/Zotero/storage/9A6GJ64Y/on-vibe-coding.html}
}
@article{tarnoffWeizenbaumsNightmaresHow2023,
entrysubtype = {newspaper},
title = {Weizenbaum’s Nightmares: How the Inventor of the First Chatbot Turned against {{AI}}},
shorttitle = {Weizenbaum’s Nightmares},
author = {Tarnoff, Ben},
date = {2023-07-25T04:00:29},
journaltitle = {The Guardian},
issn = {0261-3077},
url = {https://www.theguardian.com/technology/2023/jul/25/joseph-weizenbaum-inventor-eliza-chatbot-turned-against-artificial-intelligence-ai},
urldate = {2024-12-15},
abstract = {Computer scientist Joseph Weizenbaum was there at the dawn of artificial intelligence – but he was also adamant that we must never confuse computers with humans},
journalsubtitle = {Technology},
langid = {british},
keywords = {Artificial intelligence (AI),Chatbots,ChatGPT,generativeAI,Internet safety,MIT - Massachusetts Institute of Technology,programming,Society,Technology,Vietnam war},
file = {/Users/sen/Zotero/storage/9JEUN3MU/joseph-weizenbaum-inventor-eliza-chatbot-turned-against-artificial-intelligence-ai.html}
}
@online{taylorIfYouBelieve2025,
title = {If You Believe in “{{Artificial Intelligence}}”, Take Five Minutes to Ask It about Stuff You Know Well},
author = {Taylor, Mike},
date = {2025-02-14T23:25:35+00:00},
url = {https://svpow.com/2025/02/14/if-you-believe-in-artificial-intelligence-take-five-minutes-to-ask-it-about-stuff-you-know-well/},
urldate = {2025-08-12},
abstract = {Here’s a Mastodon thread from a year ago. Just a quick check on how ChatGPT’s getting on … Me: Who reassigned the species Brachiosaurus brancai to its own genus, and when? ChatGPT…},
langid = {english},
organization = {Sauropod Vertebra Picture of the Week},
file = {/Users/sen/Zotero/storage/6D9MC6RQ/if-you-believe-in-artificial-intelligence-take-five-minutes-to-ask-it-about-stuff-you-know-well.html}
}
@online{theoriqteamTheoriqAIAgent,
title = {Theoriq: {{The AI Agent Base Layer}}},
author = {{Theoriq Team}},
url = {https://www.theoriq.ai/litepaper},
urldate = {2025-01-25},
langid = {english},
file = {/Users/sen/Zotero/storage/EUIEF92I/litepaper.html}
}
@online{thomasMathsYouNeed2025,
title = {The Maths You Need to Start Understanding {{LLMs}}},
author = {Thomas, Giles},
date = {2025-09-02T23:30:00+0000},
url = {https://www.gilesthomas.com/2025/09/maths-for-llms},
urldate = {2025-09-06},
abstract = {A quick refresher on the maths behind LLMs: vectors, matrices, projections, embeddings, logits and softmax.},
organization = {Giles' Blog},
keywords = {genai},
file = {/Users/sen/Zotero/storage/LCFK7EN7/maths-for-llms.html}
}
@article{varaOpinionOneYear2023,
entrysubtype = {newspaper},
title = {Opinion | {{One Year In}} and {{ChatGPT Already Has Us Doing Its Bidding}}},
author = {Vara, Vauhini},
date = {2023-12-19},
journaltitle = {The New York Times},
issn = {0362-4331},
url = {https://www.nytimes.com/2023/12/19/opinion/artificial-intelligence-chatgpt.html},
urldate = {2023-12-19},
abstract = {For me, the journey began when I asked ChatGPT who I was.},
journalsubtitle = {Opinion},
langid = {american},
keywords = {{Altman, Samuel H},ai,Artificial Intelligence,artificial intelligence (a.i.),ChatGPT,Computers and the Internet,GenAI,llm,OpenAI Labs,programming,Writing and Writers},
file = {/Users/sen/Zotero/storage/FHEK3PJJ/Vara - 2023 - Opinion One Year In and ChatGPT Already Has Us D.pdf;/Users/sen/Zotero/storage/JSJCJSQN/artificial-intelligence-chatgpt.html}
}
@online{VectorDatabasesAre2024,
title = {Vector {{Databases Are}} the {{Wrong Abstraction}}},
date = {2024-10-29T13:10:00},
url = {https://www.timescale.com/blog/vector-databases-are-the-wrong-abstraction/},
urldate = {2024-10-30},
abstract = {Today’s vector databases disconnect embeddings from their source data. We should treat embeddings more like database indexes—here’s how.},
langid = {english},
organization = {Timescale Blog},
file = {/Users/sen/Zotero/storage/CHKSMKAR/vector-databases-are-the-wrong-abstraction.html}
}
@online{VibeCodingVs2025,
title = {"{{Vibe Coding}}" vs {{Reality}}},
date = {2025-03-19},
url = {https://cendyne.dev/posts/2025-03-19-vibe-coding-vs-reality.html},
urldate = {2025-03-23},
abstract = {Reviewing the capabilities and limitations of LLM agents in software development and their impact on skilled and less skilled developers.},
langid = {english},
file = {/Users/sen/Zotero/storage/PIZG7W68/2025-03-19-vibe-coding-vs-reality.html}
}
@online{viciousDrunkenPlagiaristsACM2025,
title = {The {{Drunken Plagiarists}} - {{ACM Queue}}},
author = {Vicious, Kode and Neville-Neil, George V.},
date = {2025-01-23},
url = {https://queue.acm.org/detail.cfm?ref=rss&id=3711675},
urldate = {2025-02-17},
file = {/Users/sen/Zotero/storage/YLV9IA7E/detail.html}
}
@book{weizenbaumComputerPowerHuman1976,
title = {Computer Power and Human Reason : From Judgment to Calculation},
author = {Weizenbaum, Joseph},
date = {1976},
publisher = {W.H. Freeman},
location = {San Francisco, Cal.},
isbn = {0-7167-0464-1 978-0-7167-0464-5 0-7167-0463-3 978-0-7167-0463-8},
langid = {english}
}
@article{weizenbaumComputerYourFuture1983,
entrysubtype = {magazine},
title = {The {{Computer}} in {{Your Future}}},
author = {Weizenbaum, Joseph},
namea = {Feigenbaum, Edward A. and McCorduck, Pamela},
nameatype = {collaborator},
date = {1983-10-27},
journaltitle = {The New York Review of Books},
volume = {30},
number = {16},
issn = {0028-7504},
url = {https://www.nybooks.com/articles/1983/10/27/the-computer-in-your-future/},
urldate = {2025-01-25},
abstract = {A little quiz: who spoke the following lines, and on what occasion? …no plausible claim to intellectuality can possibly be made in the near future without},
langid = {english},
note = {\href{https://archive.ph/mxUfm}{https://archive.ph/mxUfm}},
file = {/Users/sen/Zotero/storage/UHB2JE5I/Weizenbaum - 1983 - The Computer in Your Future.pdf;/Users/sen/Zotero/storage/2MG6GTC6/the-computer-in-your-future.html}
}
@online{WereSorryWe,
title = {We're Sorry We Created the {{Torment Nexus}} - {{Charlie}}'s {{Diary}}},
author = {Stross, Charles},
date = {2023-11-10},
url = {http://www.antipope.org/charlie/blog-static/2023/11/dont-create-the-torment-nexus.html},
urldate = {2023-11-15},
keywords = {distractories,TESCREAL},
file = {/Users/sen/Zotero/storage/8YI5AK5Q/dont-create-the-torment-nexus.html}
}
@misc{white2023promptpatterncatalogenhance,
title = {A Prompt Pattern Catalog to Enhance Prompt Engineering with {{ChatGPT}}},
author = {White, Jules and Fu, Quchen and Hays, Sam and Sandborn, Michael and Olea, Carlos and Gilbert, Henry and Elnashar, Ashraf and Spencer-Smith, Jesse and Schmidt, Douglas C.},
date = {2023},
eprint = {2302.11382},
eprinttype = {arXiv},
eprintclass = {cs.SE},
url = {https://arxiv.org/abs/2302.11382}
}
@online{WhyVibeCoding2025,
title = {Why {{Vibe Coding Is Overrated}}},
date = {2025-03-23},
url = {https://www.lycee.ai/blog/why-vibe-coding-is-overrated},
urldate = {2025-03-24},
abstract = {Vibes Can Only Take You So Far},
langid = {english},
file = {/Users/sen/Zotero/storage/HLL3QCHC/why-vibe-coding-is-overrated.html}
}
@online{willisonCatchingWeirdWorld2023,
title = {Catching up on the Weird World of {{LLMs}}},
author = {Willison, Simon},
date = {2023-08-03},
url = {https://simonwillison.net/2023/Aug/3/weird-world-of-llms/},
urldate = {2024-10-24},
abstract = {I gave a talk on Sunday at North Bay Python where I attempted to summarize the last few years of development in the space of LLMs—Large Language Models, the technology …},
langid = {british},
file = {/Users/sen/Zotero/storage/VGF4ZB7H/weird-world-of-llms.html}
}
@online{wolframWhatChatGPTDoing2023,
title = {What {{Is ChatGPT Doing}} … and {{Why Does It Work}}?},
author = {Wolfram, Stephen},
date = {2023-02-14},
url = {https://writings.stephenwolfram.com/2023/02/what-is-chatgpt-doing-and-why-does-it-work/},
urldate = {2023-08-05},
abstract = {Stephen Wolfram explores the broader picture of what's going on inside ChatGPT and why it produces meaningful text. Discusses models, training neural nets, embeddings, tokens, transformers, language syntax.},
langid = {english},
keywords = {artificial intelligence (a.i.),GenAI,programming},
file = {/Users/sen/Zotero/storage/QJQUQJXN/what-is-chatgpt-doing-and-why-does-it-work.html}
}
@online{wrightGitHubCopilotReplicating2024,
title = {{{GitHub Copilot}} Replicating Vulnerabilities, Insecure Code | {{TechTarget}}},
author = {Wright, Rob},
date = {2024-02-23},
url = {https://archive.is/hdp38},
urldate = {2024-09-27},
organization = {archive.is},
note = {Original URL \href{https://www.techtarget.com/searchsecurity/news/366571117/GitHub-Copilot-replicating-vulnerabilities-insecure-code?ref=wheresyoured.at\#:~:text=Research\%20from\%20Snyk\%20shows\%20that,issues\%20in\%20a\%20user's\%20codebase.}{https://www.techtarget.com/searchsecurity/news/366571117/GitHub-Copilot-replicating-vulnerabilities-insecure-code}},
file = {/Users/sen/Zotero/storage/TJAAU6MF/hdp38.html}
}
@article{WritingThinking2025,
title = {Writing Is Thinking},
date = {2025-06},
journaltitle = {Nature Reviews Bioengineering},
shortjournal = {Nat Rev Bioeng},
volume = {3},
number = {6},
pages = {431--431},
publisher = {Nature Publishing Group},
issn = {2731-6092},
doi = {10.1038/s44222-025-00323-4},
url = {https://www.nature.com/articles/s44222-025-00323-4},
urldate = {2025-07-24},
abstract = {On the value of human-generated scientific writing in the age of large-language models.},
langid = {english},
keywords = {Authorship,Communication},
file = {/Users/sen/Zotero/storage/VCPAPWM8/2025 - Writing is thinking.pdf}
}
@misc{yiu2023imitationversusinnovationchildren,
title = {Imitation versus {{Innovation}}: {{What}} Children Can Do That Large Language and Language-and-Vision Models Cannot (Yet)?},
author = {Yiu, Eunice and Kosoy, Eliza and Gopnik, Alison},
date = {2023},
eprint = {2305.07666},
eprinttype = {arXiv},
eprintclass = {cs.AI},
url = {https://arxiv.org/abs/2305.07666}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment