mirror of
https://github.com/IRS-Public/direct-file.git
synced 2025-06-27 20:25:52 +00:00
Merge 25d0baefa0
into e0d5c84451
This commit is contained in:
commit
c726980533
39 changed files with 174 additions and 170 deletions
|
@ -427,6 +427,7 @@ cd direct-file/<project>
|
|||
```
|
||||
|
||||
To run a test individually, run `./mvnw -Dtest=<Name of Test> test` with the test name. For example:
|
||||
|
||||
```sh
|
||||
./mvnw -Dtest=TaxReturnServiceTest test
|
||||
```
|
||||
|
@ -446,6 +447,5 @@ To run code coverage in any particular app:
|
|||
```sh
|
||||
./mvnw jacoco:report
|
||||
```
|
||||
|
||||
To view the generated report, go to `<app_name>/target/site/jacoco/index.html` and open it in a browser.
|
||||
|
||||
|
||||
|
|
|
@ -724,7 +724,7 @@
|
|||
</Fact>
|
||||
<Fact path="/formW2s/*/hasSeenLastAvailableScreen">
|
||||
<Name>Has Seen last available screen</Name>
|
||||
<Description>Whether the user has seen the last available scren for this W2.</Description>
|
||||
<Description>Whether the user has seen the last available screen for this W2.</Description>
|
||||
<Export downstreamFacts="true" />
|
||||
|
||||
<Writable>
|
||||
|
@ -1440,7 +1440,7 @@
|
|||
</Fact>
|
||||
|
||||
<Fact path="/maxMedicareWagesForAnyFilerOnReturn">
|
||||
<Description>The highest medicare wages a single person on the return recieved</Description>
|
||||
<Description>The highest medicare wages a single person on the return received</Description>
|
||||
<Derived>
|
||||
<GreaterOf>
|
||||
<Dependency path="/primaryFilerMedicareWages" />
|
||||
|
@ -5518,7 +5518,7 @@
|
|||
|
||||
<Fact path="/flowKnockoutBox15IncomeFromDifferentState">
|
||||
<Name>Flow Knockout Income from Unsupported State</Name>
|
||||
<Description>Used to control a knockout for if the filer recieved income from an unsupported
|
||||
<Description>Used to control a knockout for if the filer received income from an unsupported
|
||||
state.</Description>
|
||||
<Export downstreamFacts="true" />
|
||||
|
||||
|
@ -5537,7 +5537,7 @@
|
|||
</Fact>
|
||||
|
||||
<Fact path="/formW2s/*/hasKnockoutBox15IncomeFromDifferentState">
|
||||
<Name>Has Knockout recieved Income from a Different State</Name>
|
||||
<Name>Has Knockout received Income from a Different State</Name>
|
||||
<Description>Checks to see if their income is from a different state.</Description>
|
||||
|
||||
<Derived>
|
||||
|
|
|
@ -738,14 +738,14 @@
|
|||
</Fact>
|
||||
|
||||
<Fact path="/writableHasAdvancedPtc">
|
||||
<Description>(Writable) Whether the TP has recieved advaned payments of the PTC</Description>
|
||||
<Description>(Writable) Whether the TP has received advanced payments of the PTC</Description>
|
||||
<Writable>
|
||||
<Boolean />
|
||||
</Writable>
|
||||
</Fact>
|
||||
|
||||
<Fact path="/hasAdvancedPtc">
|
||||
<Description>Whether the TP has recieved advaned payments of the PTC</Description>
|
||||
<Description>Whether the TP has received advanced payments of the PTC</Description>
|
||||
<Derived>
|
||||
<Switch>
|
||||
<Case>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
- [Variables](#variables)
|
||||
- [Functions](#functions)
|
||||
- [How to set and save facts via the variables](#how-to-set-and-save-facts-via-the-variables)
|
||||
- [Reset the state of facts locally:](#reset-the-state-of-facts-locally)
|
||||
- [Reset the state of facts locally](#reset-the-state-of-facts-locally)
|
||||
- [Testing](#testing)
|
||||
- [Running fact dictionary tests](#running-fact-dictionary-tests)
|
||||
- [Running a prod build locally](#running-a-prod-build-locally)
|
||||
|
@ -45,7 +45,7 @@
|
|||
in order to avoid long waits due to network timeouts during fetches. See the [status README](../status/README.md) to get started.
|
||||
4. Run `npm install` in df-client directory, to install dependencies
|
||||
5. Run `npm run start` in df-client directory, to build the application in development mode
|
||||
6. (Optional) If you want to use the psuedo locale instead of Spanish, you can set `VITE_USE_PSEUDO_LOCALE=true` when starting the server.
|
||||
6. (Optional) If you want to use the pseudo locale instead of Spanish, you can set `VITE_USE_PSEUDO_LOCALE=true` when starting the server.
|
||||
|
||||
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
|
||||
|
||||
|
@ -121,7 +121,7 @@ In order to access the variables and functions mentioned above, one needs to hav
|
|||
5. Run step 3 to see if it's been overwritten.
|
||||
6. Load the `about-you-intro` screen again to see the new name appear in the UI.
|
||||
|
||||
### Reset the state of facts locally:
|
||||
### Reset the state of facts locally
|
||||
|
||||
1. Run `debugFactGraph.download()` to save the fact graph into a local text file.
|
||||
2. Remove any existing facts you want.
|
||||
|
|
|
@ -3,7 +3,7 @@ import { Alert } from '@trussworks/react-uswds';
|
|||
import {
|
||||
isBeforeStdDeadline,
|
||||
isPostStateFilingDeadline,
|
||||
isPostDeadlineButBeforeMassachussetsDeadline,
|
||||
isPostDeadlineButBeforeMassachussettsDeadline,
|
||||
isBeforeResubmissionDeadline,
|
||||
} from '../../utils/dateUtils.js';
|
||||
import { useContext, useMemo } from 'react';
|
||||
|
@ -76,7 +76,7 @@ export const getBannerI18nKeys = (
|
|||
} else if (isBeforeStdDeadline(now)) {
|
||||
headingI18nKey = `banner.endOfFilingSeason.headingBeforeStdDeadline`;
|
||||
bodyI18nKey = `banner.endOfFilingSeason.contentBase`;
|
||||
} else if (isPostDeadlineButBeforeMassachussetsDeadline(now)) {
|
||||
} else if (isPostDeadlineButBeforeMassachussettsDeadline(now)) {
|
||||
headingI18nKey = `banner.endOfFilingSeason.headingBeforeMaDeadline`;
|
||||
bodyI18nKey = `banner.endOfFilingSeason.contentBase`;
|
||||
} else if (isPostStateFilingDeadline(now)) {
|
||||
|
|
|
@ -213,7 +213,7 @@ export const SubmitSubcategory = (
|
|||
i18nKey='/info/complete/sign-and-submit/tax-owed-pay-later-after-tax-day'
|
||||
conditions={[`/owesBalance`, { operator: `isFalse`, condition: `/payViaAch` }, `/isAfterTaxDay`]}
|
||||
/>
|
||||
{/* // Todo: SOT seems to imply this will route to the dashboard - is that really synonomous with Exit? */}
|
||||
{/* // Todo: SOT seems to imply this will route to the dashboard - is that really synonymous with Exit? */}
|
||||
{/* <ExitButton condition={{ operator: `isFalse`, condition: `/hasStateFilingIntegration` }} /> */}
|
||||
</Screen>
|
||||
</Subcategory>
|
||||
|
|
|
@ -575,7 +575,7 @@ export const AboutYouSubcategory = (
|
|||
>
|
||||
<Heading i18nKey='/heading/you-and-your-family/about-you/will-be-claimed' batches={[`updates-0`]} />
|
||||
<InfoDisplay i18nKey='/info/you-and-your-family/about-you/will-be-claimed' batches={[`updates-0`]} />
|
||||
{/* This question is overriden by /taxpayerCannotBeClaimed, so we only let the taxpayer answer it if /taxpayerCannotBeClaimed is false */}
|
||||
{/* This question is overridden by /taxpayerCannotBeClaimed, so we only let the taxpayer answer it if /taxpayerCannotBeClaimed is false */}
|
||||
<Boolean
|
||||
path='/filers/*/willBeClaimed'
|
||||
condition={{ operator: `isFalse`, condition: `/taxpayerCannotBeClaimed` }}
|
||||
|
|
|
@ -267,7 +267,7 @@ describe(`FlowConfig`, () => {
|
|||
}
|
||||
});
|
||||
|
||||
it<LocalTestContext>(`collection context can come from subcategory or be overriden by a CollectionLoop`, ({
|
||||
it<LocalTestContext>(`collection context can come from subcategory or be overridden by a CollectionLoop`, ({
|
||||
flowConfig,
|
||||
}) => {
|
||||
for (const screen of flowConfig.screens) {
|
||||
|
|
|
@ -316,7 +316,7 @@ describe(`GET /taxreturns`, () => {
|
|||
throw new ReadError(`Response was not successful`, 501);
|
||||
});
|
||||
|
||||
// Because we already succesfully rendered once in a previous spec, we won't automatically refresh
|
||||
// Because we already successfully rendered once in a previous spec, we won't automatically refresh
|
||||
// returns.
|
||||
store.dispatch(fetchTaxReturns());
|
||||
await act(async () => {
|
||||
|
@ -404,7 +404,7 @@ describe(`save()`, () => {
|
|||
}
|
||||
});
|
||||
|
||||
test(`error responses with an apiErrorKey are returned and considered translateable`, async () => {
|
||||
test(`error responses with an apiErrorKey are returned and considered translatable`, async () => {
|
||||
fakeFetch.mockResolvedValue(
|
||||
createFetchResponse({
|
||||
status: 404,
|
||||
|
|
|
@ -91,7 +91,7 @@ export const useSubmit = () => {
|
|||
};
|
||||
if (e.apiErrorKey === `signing.retriableEsignatureError`) {
|
||||
config.internalLink = `/flow/complete/sign-and-submit/sign-return-intro`;
|
||||
// Remeber that the user has failed electronic signing at least once. The user can still retry but
|
||||
// Remember that the user has failed electronic signing at least once. The user can still retry but
|
||||
// if they proceed through the legacy signing flow we'll prefer the legacy /submit endpoint next time.
|
||||
dispatch(setElectronicSignatureFailure());
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ describe(`Relationship tests`, () => {
|
|||
relationship: `otherDescendantOfSibling`,
|
||||
relationshipOptionName: `otherDescendantOfSibling`,
|
||||
qc: true,
|
||||
// this is correct -- pub 501 specifies only a sibiling's son or daughter. Nothing further.
|
||||
// this is correct -- pub 501 specifies only a sibling's son or daughter. Nothing further.
|
||||
qrWithoutMemberOfHousehold: false,
|
||||
marriedHohQp: false,
|
||||
xmlName: `NEPHEW`,
|
||||
|
@ -103,7 +103,7 @@ describe(`Relationship tests`, () => {
|
|||
relationship: `otherDescendantOfHalfSibling`,
|
||||
relationshipOptionName: `otherDescendantOfHalfSibling`,
|
||||
qc: true,
|
||||
qrWithoutMemberOfHousehold: false, // this is correct -- pub 501 specifies only a half-sibiling's son or daughter.
|
||||
qrWithoutMemberOfHousehold: false, // this is correct -- pub 501 specifies only a half-sibling's son or daughter.
|
||||
marriedHohQp: false,
|
||||
xmlName: `NEPHEW`,
|
||||
},
|
||||
|
|
|
@ -317,7 +317,7 @@ describe(`Dependent age`, () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe(`Qualifing dependent TIN verification`, () => {
|
||||
describe(`Qualifying dependent TIN verification`, () => {
|
||||
it(`A dependent has an SSN`, ({ task }) => {
|
||||
task.meta.testedFactPaths = [`/familyAndHousehold/*/hasTin`];
|
||||
const { factGraph } = setupFactGraphDeprecated({
|
||||
|
|
|
@ -631,7 +631,7 @@ describe(`EITC eligibility`, () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe(`Must be ctizen or resident alien all year`, () => {
|
||||
describe(`Must be citizen or resident alien all year`, () => {
|
||||
describe(`Single returns`, () => {
|
||||
const baseCase = {
|
||||
...filerWithChild,
|
||||
|
|
|
@ -17,8 +17,8 @@ describe(`mefBusinessNameLine1Type`, () => {
|
|||
{ _testName: `not allowed`, input: `.^$/@!£§ÁÉÍÑÓ×ÚÜáéíñóúü[]`, expected: `` },
|
||||
{
|
||||
_testName: `special characters`,
|
||||
input: ` San-Diego's #1 Photograpy & Monkey Biz (.*~;/%") `,
|
||||
expected: `San-Diego's #1 Photograpy & Monkey Biz (*)`,
|
||||
input: ` San-Diego's #1 Photography & Monkey Biz (.*~;/%") `,
|
||||
expected: `San-Diego's #1 Photography & Monkey Biz (*)`,
|
||||
},
|
||||
];
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ const twoFilers = {
|
|||
},
|
||||
};
|
||||
|
||||
describe(`MFJ depednents`, () => {
|
||||
describe(`MFJ dependents`, () => {
|
||||
it(`MFJ dependent because of spouse`, ({ task }) => {
|
||||
task.meta.testedFactPaths = [`/isMFJDependent`];
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ describe(`Taxable social security benefits`, () => {
|
|||
});
|
||||
expect(factGraph.get(Path.concretePath(`/taxableSocialSecurityBenefits`, null)).get.toString()).toBe(`0.00`);
|
||||
});
|
||||
it(`Starts getting taxed when there is other income in addition to social security income above a threshhold`, ({
|
||||
it(`Starts getting taxed when there is other income in addition to social security income above a threshold`, ({
|
||||
task,
|
||||
}) => {
|
||||
task.meta.testedFactPaths = [`/taxableSocialSecurityBenefits`];
|
||||
|
@ -197,7 +197,7 @@ describe(`Taxable social security benefits`, () => {
|
|||
});
|
||||
expect(factGraph.get(Path.concretePath(`/taxableSocialSecurityBenefits`, null)).get.toString()).toBe(`0.00`);
|
||||
});
|
||||
it(`Starts getting taxed when there is other income in addition to social security income above a threshhold`, ({
|
||||
it(`Starts getting taxed when there is other income in addition to social security income above a threshold`, ({
|
||||
task,
|
||||
}) => {
|
||||
task.meta.testedFactPaths = [`/taxableSocialSecurityBenefits`];
|
||||
|
|
|
@ -61,7 +61,7 @@ export const isBeforeStdDeadline = (now: Date) => {
|
|||
return currentTime < DAY_WHEN_UNABLE_TO_FILE_FEDERAL.getTime();
|
||||
};
|
||||
|
||||
export const isPostDeadlineButBeforeMassachussetsDeadline = (now: Date) => {
|
||||
export const isPostDeadlineButBeforeMassachussettsDeadline = (now: Date) => {
|
||||
const currentTime = now.getTime();
|
||||
return currentTime >= DAY_WHEN_UNABLE_TO_FILE_FEDERAL.getTime() && currentTime < DAY_WHEN_UNABLE_TO_FILE_MA.getTime();
|
||||
};
|
||||
|
|
|
@ -27,7 +27,7 @@ const Done = () => {
|
|||
</Helmet>
|
||||
<SubHeader />
|
||||
<Breadcrumbs href={PREV} />
|
||||
{/* We are currrently commenting out this ternary operater as we are temporarily removing the
|
||||
{/* We are currently commenting out this ternary operator as we are temporarily removing the
|
||||
"closing soon" state until designers and product have a chance to review this copy */}
|
||||
{/* {phase?.showOpenDoneSection ? (
|
||||
<>
|
||||
|
|
|
@ -8,7 +8,7 @@ import scala.util.{Try, Success, Failure}
|
|||
import gov.irs.factgraph.monads.JSEither
|
||||
import gov.irs.factgraph.validation.{ValidationFailure, ValidationFailureReason}
|
||||
|
||||
// NOTE: These classes and types are specified just to simplify some front-end logstics
|
||||
// NOTE: These classes and types are specified just to simplify some front-end logistics
|
||||
// This pattern may need some reconsideration to minimize boilerplate
|
||||
|
||||
@JSExportAll
|
||||
|
|
|
@ -81,7 +81,7 @@ object StringFactory:
|
|||
StringFailureReason.InvalidForm1099rBox11Year,
|
||||
),
|
||||
MefRatioTypeAsPercentPattern -> (
|
||||
"Invalid characters for pecentage field",
|
||||
"Invalid characters for percentage field",
|
||||
StringFailureReason.InvalidMefRatioType,
|
||||
),
|
||||
)
|
||||
|
|
|
@ -11,7 +11,7 @@ import gov.irs.factgraph.types.Dollar
|
|||
//
|
||||
// [1]: https://www.irs.gov/pub/irs-pdf/f1040.pdf
|
||||
//
|
||||
// There is a relationship betwen lines 12a, 12b, and 12c. In order to know what
|
||||
// There is a relationship between lines 12a, 12b, and 12c. In order to know what
|
||||
// should be entered in 12c, we need to first know the values of 12a and 12b.
|
||||
//
|
||||
// This pattern might remind you of a spreadsheet. If we were to transcribe Form
|
||||
|
|
|
@ -90,7 +90,7 @@ graph.get("/factC")
|
|||
// refund owed or balance due. There will be a set of tax credits for which we
|
||||
// have not yet determined the taxpayer's eligibility. In this case, it would be
|
||||
// far better to assume the taxpayer is ineligible until we can conclusively
|
||||
// prove their eligibility, rather than presuming eligilibity and taking credits
|
||||
// prove their eligibility, rather than presuming eligibility and taking credits
|
||||
// away from the taxpayer one by one.
|
||||
//
|
||||
// Conversely, we might imagine a Fact that represents whether the taxpayer's
|
||||
|
@ -141,7 +141,7 @@ graph.save()
|
|||
|
||||
graph.get("/conclusion")
|
||||
|
||||
// Unsuprisingly, the value is false. But more significantly, despite fact C
|
||||
// Unsurprisingly, the value is false. But more significantly, despite fact C
|
||||
// still being missing, the result is now complete. This makes sense if we think
|
||||
// about whether the value of fact C could affect the result. Now that fact B is
|
||||
// false, the value of fact C no longer matters; no matter what, the children of
|
||||
|
|
|
@ -325,7 +325,7 @@ val graph = Graph(
|
|||
graph.get("/tax")
|
||||
|
||||
// Note that when the arguments to an operation serve different roles, the
|
||||
// operations require us to explicilty label them. So while Add and Multiply can
|
||||
// operations require us to explicitly label them. So while Add and Multiply can
|
||||
// take arguments in any order, Subtract and Divide use Minuend/Subtrahends and
|
||||
// Dividend/Divisors to avoid ambiguity. Similarly, comparison operations like
|
||||
// LessThanOrEqual explicitly specify Left and Right.
|
||||
|
|
|
@ -9,7 +9,7 @@ import java.util.UUID
|
|||
//
|
||||
// In the last chapter, we described Collections and CollectionItems as Facts
|
||||
// with special properties. In this chapter, we'll explore some more of those
|
||||
// special proprties, and also look at how we can operate on Collections and
|
||||
// special properties, and also look at how we can operate on Collections and
|
||||
// CollectionItems same as we would any other Fact.
|
||||
//
|
||||
// As we often do, let's start by building a FactDictionary with some examples.
|
||||
|
|
|
@ -143,7 +143,7 @@ class TinNodeSpec extends AnyFunSpec:
|
|||
assert(fact.get(0) == Result.Complete(Tin("999-99-0000")))
|
||||
}
|
||||
it("accepts an argument allowing TINs that are all 0's") {
|
||||
// On W-2s, if a filer hasn't recieved a TIN by the time the W-2 is
|
||||
// On W-2s, if a filer hasn't received a TIN by the time the W-2 is
|
||||
// printed, the W-2 might have an SSN of 000-00-0000, so we have a flag
|
||||
// to allow that
|
||||
val dictionary = FactDictionary()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## About MeF Status
|
||||
The MeF Status application's job is to:
|
||||
- Regularly poll MeF to learn about the status of each submitted tax return, until we recieve a final status. A final status is one that will not change: Accepted, Rejected, or an error that will not get resolved without intervention.
|
||||
- Regularly poll MeF to learn about the status of each submitted tax return, until we receive a final status. A final status is one that will not change: Accepted, Rejected, or an error that will not get resolved without intervention.
|
||||
- Save the final status for each tax return where it can be accessed by the backend application
|
||||
|
||||
### How it works
|
||||
|
@ -22,7 +22,6 @@ The MeF Status application's job is to:
|
|||
build-project.sh script from the
|
||||
`submit` folder of the direct_file project.
|
||||
|
||||
|
||||
### Proxy
|
||||
If you use a proxy, first see `MAVEN_OPTS` in [the project readme](../README.md#important-configuration-variables) and [the OMB Connect readme](../README-omb-connect.md) for information about ensuring your proxy settings are passed to all build steps.
|
||||
|
||||
|
@ -49,12 +48,11 @@ The app probably failed for you due to lack of configuration. You can look at t
|
|||
docker compose logs mef-status
|
||||
```
|
||||
|
||||
|
||||
## Running locally
|
||||
|
||||
### Environment
|
||||
|
||||
Set the following environment variables in your local environment which will facilitate running both the applications as well as the docker containers. On macbooks, placing the export statements below in the `.zshrc` (gets run and evaluated everytime a shell instance is started) or `.zprofile` (gets run and evaluated when a user logs in) file will accomplish this. If using the bash shell, placing them in `.bashrc` should do (and effectively behave similar to `.zshrc`).
|
||||
Set the following environment variables in your local environment which will facilitate running both the applications as well as the docker containers. On MacBooks, placing the export statements below in the `.zshrc` (gets run and evaluated every time a shell instance is started) or `.zprofile` (gets run and evaluated when a user logs in) file will accomplish this. If using the bash shell, placing them in `.bashrc` should do (and effectively behave similar to `.zshrc`).
|
||||
|
||||
```
|
||||
# Get the keystore alias from a fellow developer and replace the value in between quotes with the actual value
|
||||
|
@ -77,10 +75,11 @@ export STATUS_ETIN="[status-etin]"
|
|||
```
|
||||
|
||||
You'll also need to set up the `LOCAL_WRAPPING_KEY` following the instructions in the [backend README](../backend/README.md#initial-setup)
|
||||
|
||||
```
|
||||
export LOCAL_WRAPPING_KEY="[local-wrapping-key]"
|
||||
```
|
||||
|
||||
### Static Analysis: Spot Bugs and PMD
|
||||
For notes and usage on spotbugs see the [Backend API README Spot Bugs section](../submit/README.md#static-analysis)
|
||||
|
||||
For notes and usage on spotbugs see the [Backend API README Spot Bugs section](../submit/README.md#static-analysis)
|
||||
|
|
|
@ -43,7 +43,7 @@ status:
|
|||
root-translation-key: status
|
||||
translation-key-splitter: .
|
||||
ack-poll-in-milliseconds: 60000
|
||||
# this field controls whether a Pending Response is returned from the /status endpoint when no submisionId is found.
|
||||
# this field controls whether a Pending Response is returned from the /status endpoint when no submissionId is found.
|
||||
# we will want to disable this in the future (likely when SQS communication is enabled).
|
||||
status-endpoint-returns-pending-by-default-enabled: true
|
||||
status-polling-enabled: true
|
||||
|
|
|
@ -67,7 +67,7 @@ docker compose logs mef-submit
|
|||
|
||||
### Environment
|
||||
|
||||
Set the following environment variables in your local environment which will facilitate running both the applications as well as the docker containers. On macbooks, placing the export statements below in the `.zshrc` (gets run and evaluated everytime a shell instance is started) or `.zprofile` (gets run and evaluated when a user logs in) file will accomplish this. If using the bash shell, placing them in `.bashrc` should do (and effectively behave similar to `.zshrc`).
|
||||
Set the following environment variables in your local environment which will facilitate running both the applications as well as the docker containers. On MacBooks, placing the export statements below in the `.zshrc` (gets run and evaluated every time a shell instance is started) or `.zprofile` (gets run and evaluated when a user logs in) file will accomplish this. If using the bash shell, placing them in `.bashrc` should do (and effectively behave similar to `.zshrc`).
|
||||
|
||||
```sh
|
||||
# Get the base64 encoded keystore from a fellow developer and replace the value in between quotes with the actual value
|
||||
|
@ -93,6 +93,7 @@ export SUBMIT_ID_VAR_CHARS="wa"
|
|||
```
|
||||
|
||||
You'll also need to set up the `LOCAL_WRAPPING_KEY` following the instructions in the [backend README](../backend/README.md#initial-setup)
|
||||
|
||||
```
|
||||
export LOCAL_WRAPPING_KEY="[local-wrapping-key]"
|
||||
```
|
||||
|
@ -103,11 +104,12 @@ Maven command for running the application locally:
|
|||
./mvnw spring-boot:run -Dspring-boot.run.profiles=development
|
||||
```
|
||||
|
||||
### Developer Notes:
|
||||
### Developer Notes
|
||||
|
||||
Files in the `direct-file/submit/src/main/java/gov/irs/directfile/submit/gen` directory are generated by the `direct-file/utils/mef-code-generator` application.
|
||||
|
||||
|
||||
### Static Analysis
|
||||
|
||||
We use [SpotBugs](https://spotbugs.readthedocs.io/en/stable/bugDescriptions.html) and [PMD](https://pmd.github.io/pmd/index.html) for static code analysis in this app. The app is configured to have pre-commit hooks
|
||||
run SpotBugs and PMD.
|
||||
|
||||
|
@ -122,16 +124,15 @@ To see a formatted html page for the static analysis reports you can run:
|
|||
```bash
|
||||
./mvnw compile site:run
|
||||
```
|
||||
This will start a site at `localhost:9898`. Navigate to `Project Reports` and then click on PMD or Spotbugs to view errors in the app.
|
||||
|
||||
This will start a site at `localhost:9898`. Navigate to `Project Reports` and then click on PMD or Spotbugs to view errors in the app.
|
||||
|
||||
If you want to ignore the pre-commit hook that runs spotbugs do:
|
||||
|
||||
`git commit --no-verify`
|
||||
|
||||
|
||||
|
||||
To generate each report, you can run:
|
||||
|
||||
```bash
|
||||
./mvnw compile spotbugs:spotbugs
|
||||
```
|
||||
|
@ -139,9 +140,11 @@ To generate each report, you can run:
|
|||
```bash
|
||||
./mvnw pmd:pmd
|
||||
```
|
||||
|
||||
The xml file this generates isn't very readable, so use `./mvnw spotbugs:gui` for an interactive guild or run `./mvnw site` to see an html report of the xml report.
|
||||
|
||||
To check if the project currently passes static analysis:
|
||||
|
||||
```bash
|
||||
./mvnw compile spotbugs:check
|
||||
```
|
||||
|
@ -149,6 +152,7 @@ To check if the project currently passes static analysis:
|
|||
```bash
|
||||
./mvnw pmd:check
|
||||
```
|
||||
|
||||
SpotBugs also offers a local gui that displays information based on the output of spotbugs. Calling compile before spotless:gui, ensures
|
||||
we have all the latest changes reflected in the spotbugs report.
|
||||
|
||||
|
@ -172,7 +176,9 @@ PMD Docs Site: https://docs.pmd-code.org/latest/index.html
|
|||
PMD Java Rules: https://docs.pmd-code.org/latest/pmd_rules_java.html
|
||||
|
||||
PMD Maven Plugin Docs: https://maven.apache.org/plugins/maven-pmd-plugin/index.html
|
||||
|
||||
## Health Check
|
||||
|
||||
The Submit App uses [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/2.5.6/reference/html/actuator.html#actuator.endpoints.enabling) to expose a health check endpoint.
|
||||
When running the app locally, the health check endpoint should be available at:
|
||||
|
||||
|
@ -180,7 +186,6 @@ When running the app locally, the health check endpoint should be available at:
|
|||
http://localhost:{PORT}/actuator/health
|
||||
```
|
||||
|
||||
|
||||
## Tests
|
||||
|
||||
Run tests locally with `./mvnw test`
|
||||
|
|
|
@ -139,7 +139,7 @@ public class ErrorHandlingIntegrationTest {
|
|||
/**
|
||||
* This test walks through the workflow of handling a batch of submissions
|
||||
* that fails to submit to MeF. Note that submission failure is distinct from a submission being rejected.
|
||||
* A submission failure means some error occured that prevented us from submitting to MeF entirely.
|
||||
* A submission failure means some error occurred that prevented us from submitting to MeF entirely.
|
||||
*
|
||||
* Flow of this test:
|
||||
* 1. Create a Batch that contains 2 submissions
|
||||
|
|
|
@ -81,7 +81,7 @@ The first step is to identify whether the work is destined for the current `prod
|
|||
#### Releases and Deployment
|
||||
- Cut releases from `main`
|
||||
- Deploy from `main`
|
||||
- Reserve a lower environment for regular dpeloyments from `future` for testing future features
|
||||
- Reserve a lower environment for regular deployments from `future` for testing future features
|
||||
- Continue these two branches throughout the year
|
||||
- When it is time to cutover to TY25 (December 2025 or January 2026), fork a new `main` branch from `future` and continue the same processes for the next tax year
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ Written: 20Jun2023
|
|||
CSS has some major problems. Namely, it:
|
||||
1. lacks variable names and the ability to import variables from other libraries.
|
||||
1. lacks nesting and other hierarchical structures that are common in components.
|
||||
1. is global (cascading!), leading to naming conflicts, difficult dead code detection, and difficult maintainbility in large code bases.
|
||||
1. is global (cascading!), leading to naming conflicts, difficult dead code detection, and difficult maintainability in large code bases.
|
||||
1. is constantly updated, and can have different implementations and minor differences between different browsers
|
||||
|
||||
To avoid these issues, while adding additional features, most of the web development community uses one or more forms of CSS Preprocessor, preprocessing a superset of CSS into the CSS that will eventually reach the users' browsers.
|
||||
|
@ -28,9 +28,9 @@ To avoid these issues, while adding additional features, most of the web develop
|
|||
There are a few CSS popular preprocessors:
|
||||
1. [SASS](https://sass-lang.com/), per their own marketing speak, defines themselves as "the most mature, stable, and powerful professional grade CSS extension language in the world." Sass has ~10m weekly downloads on NPM and is increasing in number of downloads.
|
||||
1. [LESS](https://lesscss.org/) is the main competitor to SASS, and contains many of the same features. Less has ~4m weekly downloads on NPM and is flat in number of downloads.
|
||||
1. [PostCSS](https://postcss.org/) converts modern css into something most browsers can understand, placing polyfills in place. PostCSS is not a separate languagea -- it's a compile step like babel for greater compatibility. Stylelint and other tools are built on PostCSS
|
||||
1. [PostCSS](https://postcss.org/) converts modern css into something most browsers can understand, placing polyfills in place. PostCSS is not a separate language -- it's a compile step like babel for greater compatibility. Stylelint and other tools are built on PostCSS
|
||||
1. [CSS Modules](https://github.com/css-modules/css-modules) provide local scoping for CSS. Styles are defined in a normal css/less/sass file, then are imported into the React components that use those classes.
|
||||
1. [Tailwind](https://tailwindcss.com/) is noteable for being slightly different than other popular CSS frameworks, and is a css framework -- rather than a preprocessor -- that encourages stylistic, rather than semanetic, classnames directly in markup. It's gaining popularity rapidly (4.7m downloads/wk, up from 3m downloads/wk a year ago). However, it would be hard to integrate with USWDS.
|
||||
1. [Tailwind](https://tailwindcss.com/) is notable for being slightly different than other popular CSS frameworks, and is a css framework -- rather than a preprocessor -- that encourages stylistic, rather than semantic, classnames directly in markup. It's gaining popularity rapidly (4.7m downloads/wk, up from 3m downloads/wk a year ago). However, it would be hard to integrate with USWDS.
|
||||
1. [Stylelint](https://stylelint.io/) is a CSS linter used to prevent bugs and increase maintainability of CSS
|
||||
|
||||
|
||||
|
@ -38,7 +38,7 @@ There are a few CSS popular preprocessors:
|
|||
We should run the following CSS Preprocessors:
|
||||
1. Our CSS Language should be SASS, given its popularity and interoperability with USWDS. Most critically, we can import variable names from USWDS.
|
||||
1. We should additionally use SASS Modules to scope our CSS to their components, avoiding global cascades.
|
||||
1. We should use stylelint with its recommended config. We should also use the [a11y](https://www.npmjs.com/package/@ronilaukkarinen/stylelint-a11y) plugin experimentally to see if it helps us with accesibility (though noting that it seems not well supported and we should be willing to drop it).
|
||||
1. We should use stylelint with its recommended config. We should also use the [a11y](https://www.npmjs.com/package/@ronilaukkarinen/stylelint-a11y) plugin experimentally to see if it helps us with accessibility (though noting that it seems not well supported and we should be willing to drop it).
|
||||
1. Following our SASS compilation step, we should run postcss to get down to a supported list of browsers that we support via [browserlist](https://github.com/browserslist/browserslist#readme)
|
||||
|
||||
Unsurprisingly, when developing for these criteria (and with a sigh of relief that USWDS uses SASS), this is the same CSS stack used by [Create React App](https://create-react-app.dev/docs/adding-a-css-modules-stylesheet).
|
|
@ -7,7 +7,7 @@
|
|||
## Context and Problem Statement
|
||||
|
||||
[//]: # ([Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.])
|
||||
A decision was made in [adr-screener-config](./adr-screener-config.md) to use Astro SSG (static site generator) for the screener application. It was initially used for an MVP, and later replaced with React/Vite. This adr is to document that change and supercede the previous adr.
|
||||
A decision was made in [adr-screener-config](./adr-screener-config.md) to use Astro SSG (static site generator) for the screener application. It was initially used for an MVP, and later replaced with React/Vite. This adr is to document that change and supersede the previous adr.
|
||||
|
||||
## Decision Drivers
|
||||
|
||||
|
@ -26,7 +26,7 @@ Chosen option: "React/Vite", because it was consistent with the client app and t
|
|||
- More dynamic content is an option
|
||||
- We can easily utilize react-uswds library.
|
||||
- The i18n system is aligned in both the screener and the client app.
|
||||
- Engineers don't need to learn multiple systems and can seemlessly develop between the two apps. Onboarding for new engineers is simplified.
|
||||
- Engineers don't need to learn multiple systems and can seamlessly develop between the two apps. Onboarding for new engineers is simplified.
|
||||
|
||||
### Negative Consequences
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ informed: "All project technical staff"
|
|||
|
||||
In supporting a critical task for all U.S. residents, Direct File should be accessible. This decision record sets a clear target for this phase of development to meet those accessibility needs, and we expect to exceed that target when feasible.
|
||||
|
||||
The U.S. describes its accessiblity requirements in [Section 508](https://www.section508.gov/) of the Rehabilitation Act. [WCAG](https://www.w3.org/WAI/standards-guidelines/wcag/) provides the same for the internet at large with three levels of compliance (A, AA, AAA), and it has increased in minor versions (2.0, 2.1, 2.2) over the last 15 years. 508 and WCAG have a [very large overlap](https://www.access-board.gov/ict/#E207.2), and all non-overlapping features unique to 508 are either irrelevant to this project (e.g. manual operation and hardware) or out of this repo's scope (e.g. support channels). See the note in "More Information" below for further information.
|
||||
The U.S. describes its accessibility requirements in [Section 508](https://www.section508.gov/) of the Rehabilitation Act. [WCAG](https://www.w3.org/WAI/standards-guidelines/wcag/) provides the same for the internet at large with three levels of compliance (A, AA, AAA), and it has increased in minor versions (2.0, 2.1, 2.2) over the last 15 years. 508 and WCAG have a [very large overlap](https://www.access-board.gov/ict/#E207.2), and all non-overlapping features unique to 508 are either irrelevant to this project (e.g. manual operation and hardware) or out of this repo's scope (e.g. support channels). See the note in "More Information" below for further information.
|
||||
|
||||
Given these equivalencies, all considered options are oriented toward WCAG and achieve 508 compliance.
|
||||
|
||||
|
@ -38,7 +38,7 @@ Chosen option: "WCAG 2.2 AA (forward-thinking)", because it ensures we meet our
|
|||
* Good, because it challenges us to maximize WCAG at level AAA
|
||||
* Good, because it sets Direct File up with the latest a11y guidance for years to come
|
||||
* Good, because it doesn't require any more work than WCAG 2.1 AA with our current design
|
||||
* Neutral, because it may require slighly more work than the bare minimum in the future
|
||||
* Neutral, because it may require slightly more work than the bare minimum in the future
|
||||
* Neutral, some automated a11y tools don't yet support 2.2 (as of Oct 2023)
|
||||
|
||||
### Confirmation
|
||||
|
@ -65,7 +65,7 @@ This decision to exceed 508 requirements is confirmed by the IRS' 508 Program Of
|
|||
* Good, because it establishes and exceeds 508 compliance
|
||||
* Good, because it is easy to remember the level expected of all elements (i.e. "what needs level A vs AA?")
|
||||
* Good, because it challenges us to maximize WCAG at level AAA
|
||||
* Neutral, because it may require slighly more work than the bare minimum
|
||||
* Neutral, because it may require slightly more work than the bare minimum
|
||||
* Neutral, because it is an outdated version (see "more information" below)
|
||||
|
||||
### WCAG 2.2 AA (forward-thinking)
|
||||
|
@ -75,7 +75,7 @@ This decision to exceed 508 requirements is confirmed by the IRS' 508 Program Of
|
|||
* Good, because it challenges us to maximize WCAG at level AAA
|
||||
* Good, because it sets Direct File up with the latest a11y guidance for years to come
|
||||
* Good, because it doesn't require any more work than WCAG 2.1 AA with our current design
|
||||
* Neutral, because it may require slighly more work than the bare minimum in the future
|
||||
* Neutral, because it may require slightly more work than the bare minimum in the future
|
||||
* Neutral, some automated a11y tools don't yet support 2.2 (as of Oct 2023)
|
||||
|
||||
## More Information
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
## Overview
|
||||
|
||||
There are two main scenarios that, as it currently stands, the combination of our incident response (IR) practices and customer support (CS) motion do not support:
|
||||
|
||||
a) A taxpayer hits submit but an XMl validation failure occurs and they cannot submit at all. CS is unavailable (for whatever reason) and they give up attempting to submit with Direct File.
|
||||
|
@ -10,7 +11,6 @@ In the above scenarios, neither taxpayer knows 1) if the issue they ran into has
|
|||
|
||||
While our scale to-date (as of 2/26) has shielded us from the pain of these scenarios, or alleviated them altogether, we don't have a clear way to address them at this moment. Further, there is a very high likelihood that these scenarios will occur over the coming weeks and will become especially painful if/when the submission volume scales dramatically faster than our CS capabilities.
|
||||
|
||||
|
||||
## Proposals
|
||||
|
||||
While we cannot change our CS motion to support this, we can enable a better product experience through how we notify taxpayers via email when an error occurs in our system.
|
||||
|
@ -21,12 +21,11 @@ We should notify taxpayers via email that there was an error submitting their re
|
|||
### Notify taxpayers when Direct File has deployed a fix that should allow them to resubmit their return
|
||||
We should also notify taxpayers via email that they are able to submit their return when we have deployed a fix into production that addresses the error that blocked them from submitting in the first place.
|
||||
|
||||
|
||||
### Proposed Technical Changes (Rough)
|
||||
|
||||
1. Add two new HTML Templates to capture the two notification scenarios above, e.g. SubmissionErrorTemplate and ErrorResolvedTemplate. The templates should be added to the backend app, such that the ConfirmationService can process it, as well as the actual HTML template in the email app that is sent via email and rendered to the taxpayer
|
||||
2. When an XML validation failure occurs during submission create a `SubmissionEvent` with an `eventType` of `error_xml` and enqueue a message from the backend to the email app to notify the user (naming of the eventType is TBD, might make sense to add a new `message` column and keep the eventType as `error`)
|
||||
3. Update the SQS message sent from submit -> backend (on the submission confirmation queue) to allow for an `error` status. If the ConfirmationService and SendService are properly configured as per #1 above, everything should flow seemlessly. Similar to #2, create a `SubmissionEvent` with an `eventType` of `error_mef` for each submission that failed to submit to MeF (naming of the eventType is TBD, might make sense to add a new `message` column and keep the eventType as `error`)
|
||||
3. Update the SQS message sent from submit -> backend (on the submission confirmation queue) to allow for an `error` status. If the ConfirmationService and SendService are properly configured as per #1 above, everything should flow seamlessly. Similar to #2, create a `SubmissionEvent` with an `eventType` of `error_mef` for each submission that failed to submit to MeF (naming of the eventType is TBD, might make sense to add a new `message` column and keep the eventType as `error`)
|
||||
4. Add a function to the backend that, when called, ingests a CSV of `taxReturnIds`, transforms the list into a SubmissionStatusMessage and calls ConfirmationService.handleStatusChangeEvent
|
||||
5. Once a deploy goes out that fixes the underlying issue, create a CSV with `taxReturnIds` of the affected taxpayers (both those who reached out to CS and those who did not) using Splunk queries
|
||||
6. Send this CSV to IEP and ask 1) their System Admin to run the command specified in #4; or 2)have them upload it to S3 and do something similar to the email allow list such that the function specified in #4 polls S3 and sends emails based off this polling. This second approach would require more state management but would possibly cut out the need for IEP to run commands and maybe obviate the need for a p2 to make this happen.
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
# Data Import Redux
|
||||
|
||||
Date: 5 Nov 2024
|
||||
|
||||
## Context
|
||||
|
||||
To facilitate the correctness of the frontend data import system, we will use redux to coalesce data from APIs.
|
||||
|
||||
Data Import has many lifetimes that are independent of traditional timeframes from the app. Data fetches get kicked off, data returns later, data gets validated as ready for import all independently of user action.
|
||||
|
@ -14,21 +16,21 @@ Data Import will use the Redux Library to make it easier for us to manage changi
|
|||
|
||||
We will write logic in redux to transform data as it comes in so that the frontend knows when to use it.
|
||||
|
||||
|
||||
# Alternatives Considered
|
||||
|
||||
## React Context and Fact Graph
|
||||
|
||||
- Lack of chained actions - because we expect data from different sections (about you, IP PIN, w2) to come in at different times, we need to be able to chain /retry fetches and coalsce them into one structure. Redux makes this much easier than alternatives considered.
|
||||
- Lack of chained actions - because we expect data from different sections (about you, IP PIN, w2) to come in at different times, we need to be able to chain /retry fetches and coalesce them into one structure. Redux makes this much easier than alternatives considered.
|
||||
- Limits blast radius - with data coming in and out while people are on other screens, redux provides much better APIs to avoid rerenders on API calls that are not relevant to the current screen.
|
||||
|
||||
# Other Libraries
|
||||
|
||||
I looked briefly at Recoil, MobX, Zustand and Jotai but they all seemed geared at simpler apps. Some of Data Import's initial features (e.g. knowing if more than a second has elapsed during a request) are much easier to impliment in redux based on my prototyping. Secondly, Redux is so well used that nobody ever got fired for using redux :P
|
||||
I looked briefly at Recoil, MobX, Zustand and Jotai but they all seemed geared at simpler apps. Some of Data Import's initial features (e.g. knowing if more than a second has elapsed during a request) are much easier to implement in redux based on my prototyping. Secondly, Redux is so well used that nobody ever got fired for using redux :P
|
||||
|
||||
# Future Uses
|
||||
|
||||
Redux has a few key advantages over things we have in our codebase right now:
|
||||
|
||||
- Automatically manages the render cycle more efficiently (important as our react tree grows ever larger)
|
||||
- Proven at scale with complex application state
|
||||
- Well known in the industry with a good tooling eco system
|
||||
|
|
|
@ -466,7 +466,6 @@ Examples of the "start," "continue," and right-caret cues that lead taxpayers th
|
|||
|
||||
<img width="820" alt="Checklist_start_continue" src="https://github.com/user-attachments/assets/f1c639f5-91b2-41e0-af01-36812fa66fa3" />
|
||||
|
||||
|
||||
### **Navigation**
|
||||
|
||||
* Taxpayers will be prompted by a Start button to begin a section or a Continue button if they have saved at least one input in a subsection, left off in their task and then came back to resume work. Those prompts change as the user progresses. They go away (for section titles) or are replaced with a right-caret (for subsection titles) when DF considers the segment has been completed.
|
||||
|
@ -665,7 +664,6 @@ Other subsections start with a Collection hub, which allows a taxpayer to report
|
|||
|
||||

|
||||
|
||||
|
||||
These are examples of collection hubs:
|
||||
|
||||
* Family and household
|
||||
|
@ -764,7 +762,6 @@ The amount of content on these screens varies. We haven't been strict about the
|
|||
|
||||

|
||||
|
||||
|
||||
If there's a lot of information, the intro info could be broken into 2 screens, like for Family and household:
|
||||
|
||||

|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Design and a11y review process
|
||||
|
||||
The below steps are specifically the design and accessibility testing part of the overall acceptance flow process. The code submitter should have done their due dilligence for any user interface changes so that a design/a11y reviewer can focus on finer details. Although there are several steps, if we do this regularly it will be a light lift and will avoid any design/a11y debt.
|
||||
The below steps are specifically the design and accessibility testing part of the overall acceptance flow process. The code submitter should have done their due diligence for any user interface changes so that a design/a11y reviewer can focus on finer details. Although there are several steps, if we do this regularly it will be a light lift and will avoid any design/a11y debt.
|
||||
|
||||
## Verify that any changes match the design intention
|
||||
|
||||
|
|
|
@ -3,27 +3,28 @@ Updated At: May 14, 2024
|
|||
|
||||
RFC: Evolving How We Align on Technical Decisions
|
||||
|
||||
|
||||
# Problem Statement
|
||||
|
||||
Direct File has matured as an application over the past year (read: we went to production) and grew its headcount to a 70+ member product team. However, we have not invested a lot of resources into adapting our processes around technical decision making to our burgeoning scale. This has manifested itself in last-mile delivery delays for various initiatives, primarily due to the fact that the right stakeholders were not in the room at the appropriate moments or they did not know that a decision was being made until after the fact.
|
||||
|
||||
Similarly, as Direct File grows in product scope, our relationship with the larger IRS IT organization will both change and become increasingly important. Integrating our ways of working during the pilot to the processes of the IRS enterprise more broadly will require a different approach than what served us during the pilot.
|
||||
|
||||
# BLUF: Proposed Process Changes
|
||||
|
||||
1. Distinguish RFCs from ADRs and clarify when to leverage each one
|
||||
2. Establish a dedicated meeting time, with defined decision-makers, for reviewing RFCs that did not achieve alignment during async review
|
||||
3. Include IRS IT SMEs within the RFC process and identify the criteria under which they should be engaged for certain changes
|
||||
|
||||
|
||||
# Definitions
|
||||
|
||||
Request For Comments (RFC): In the engineering context, a formal document that recommends a technical specification for a set of product requirements. This can include anything from a change solely at the application level to a system(s)-wide, architecture that engages with multiple external services. Typically contains a set of working definitions, articulation of key product requirements, proposed implementation and analysis of alternatives. If the RFC is approved, it becomes the touchpoint for technical implementation and should be revised according to if new requirements appear. Some RFC are designated indefinitely with Experimental or Draft status.
|
||||
|
||||
Architecture Decision Record (ADR): A “lightweight” document that captures the rationale of a Architectural Decision (AD), i.e. a justified design choice that addresses a functional or non-functional requirement that is architecturally significant. ADRs captures a single AD and its rationale; the collection of ADRs created and maintained in a project constitute its decision log. ADRs typically contains a title, status, context, decision, and consequences.
|
||||
|
||||
|
||||
# Proposal
|
||||
|
||||
## Goal
|
||||
|
||||
The goal of this proposal is to find the right balance between:
|
||||
|
||||
1) Functioning in an product-centric, agile manner;
|
||||
|
@ -39,7 +40,9 @@ We should engage with, at a minimum, our embedded Cyber SME and Technical Adviso
|
|||
IRS IT SMEs should be involved when a system-wide change is being proposed, in particular one that might involve an update to any part of our ATO and especially control implementation statements in our System Security Plan (SSP). These changes typically require updating the `application.boundary` or `application.context` compliance documentation at some stages.
|
||||
|
||||
### When should I loop in IRS IT SMEs?
|
||||
|
||||
Examples of "system-wide changes" includes, but are not limited to:
|
||||
|
||||
- Provisioning new cloud infrastructure, both compute and storage
|
||||
- Requesting changes to how our network boundaries are configured
|
||||
- Adding new API endpoints or modifying where a URI can be located (i.e. changing the structure of the endpoint)
|
||||
|
@ -54,17 +57,18 @@ Examples of "system-wide changes" includes, but are not limited to:
|
|||
- Major version upgrades of software framework components
|
||||
- Standing up new deployed services
|
||||
|
||||
|
||||
## Leverage RFCs as the primary mechanism to propose technical specifications
|
||||
|
||||
We currently conflate the concept of an ADR with an RFC. ADRs are static artifacts that are post-decisional; an engineer should be able to read the entire decision log of ADRs and roughly understand why the system is configured as it is. RFCs, on the other hand, are live documents meant for discussion and iteration. They are better suited for soliciting input and collecting feedback on a proposed approach, which is the right first step for proposing a technical specification. The outcome of an RFC might be an ADR, if the scale of proposed change merits it.
|
||||
|
||||
In practice, this means that once engineers are given a sufficiently large or complex set of requirements, they articulate their proposed approach in an RFC, rather than a combination of Gitlab tickets, Slack threads, and markdown files committed to the codebase with the word "ADR" in the title. This forces the engineer to spend more time substantiating their reasoning for a certain implementation and weighing various alternatives, as well as investigating the various upstream and downstream dependencies of the proposal. It also requires the engineer to consolidate their thoughts into a single SOT document, reducing the cognitive overhead of all participants of tracking the outcome across multiple surfaces (Github, Gitlab, Slack, Teams, etc.).
|
||||
|
||||
Writing an RFC **does not** negate the need to prototype various solutions; rather, prototyping should be considered part of the RFC process as a way to demonstrate the feasability of a given approach and that alternatives were considered.
|
||||
Writing an RFC **does not** negate the need to prototype various solutions; rather, prototyping should be considered part of the RFC process as a way to demonstrate the feasibility of a given approach and that alternatives were considered.
|
||||
|
||||
Importantly, designing larger features and broader architecture design has historically been limited to a small number of developers relative to the size of the engineering organization, limiting the ability for other engineers to contribute and grow as system designers. This is mostly due to reasons of velocity and speed necessary to get the pilot out the door. RFCs provide a mechanism through which to enable other engineers to own their features end-to-end, instead of relying on another engineer to propose the implementation which they then action.
|
||||
|
||||
## Add a synchronous, cross-org forum for discussing RFCs that are not resolved asynchronously
|
||||
|
||||
In addition to moving to a world where RFCs are the formal document that facilitate discussion, we should also move away from a model where major engineering decisions are both reviewed and approved by a single functional team in a predominately asynchronous, i.e. in Github PRs. Instead, we should move towards a model where a broader audience can weigh in on proposed changes and discuss outstanding questions in a synchronous manner.
|
||||
|
||||
Making RFC review into a blocking mechanism is not the goal. Synchronous time should be leveraged **only** when there are outstanding questions on a proposal that require live, cross-team discussion. While async review and approval is always the first and best option, practically speaking at a certain level of system change, live discussion is inevitable if not necessary. We should embrace that reality, not fight it and rely on back-channels and 100-comment Slack thread to facilitate alignment on major changes.
|
||||
|
@ -73,30 +77,31 @@ Tactically, this would involve adding a standing RFC-review meeting that is 1) t
|
|||
|
||||
One key benefit here is that a cross-organization, discussion-based approach to RFCs reduces knowledge silos across the product organization and allows engineers to better 1) understand what is happening across different teams; and thus 2) flag cross-cutting concerns that might not have been addressed during the primary review phase (e.g. changes to authn/authz affects many different teams, but not every team might be involved as the primary reviewers).
|
||||
|
||||
|
||||
### Why a standing meeting instead of as needed/ad-hoc?
|
||||
|
||||
While the flexibility of ad-hoc better mirrors our historical and current practices around engineering meetings, there are a few reasons why a standing meeting with the sole purpose of reviewing RFCs is beneficial, at least in the first instance:
|
||||
|
||||
1. The right people are always in the room: the blended team model create a world where no single individual has access to everyone's calendar. By maintaining a standing meeting, everyone must put re-occuring blocks on their respective calendars, greatly increasing the chance that if they are a stakeholder, they will be able to attend.
|
||||
1. The right people are always in the room: the blended team model create a world where no single individual has access to everyone's calendar. By maintaining a standing meeting, everyone must put re-occurring blocks on their respective calendars, greatly increasing the chance that if they are a stakeholder, they will be able to attend.
|
||||
1. In this vein, we want to ensure that our key IRS IT counterparts - those with a known stake in facilitating the delivery of the technical output - have their concerns are addressed before proceeding to implementation. This reduces our overall delivery lead time by removing "unknown unknowns" and proactively identifying (and accounting for) process-based roadblocks much earlier in the delivery process.
|
||||
2. Resolving opposing views: major engineering changes often have several viable paths, and it is rare to have all outstanding questions answered asynchronously. A standing meeting releases both the author and reviewer from "finding a time to hash it out live" in favor of using a dedicated mechanism like RFC review (with an agenda and time limit on topics) to facilitate to discussion. This reduces unnecessary friction within and across teams, and enables other members of the organization to manage the discussion.
|
||||
3. Context sharing and maintaining visibility for other teams and leadership: As Direct File grows, it is unrealistic that the people who might have reviewed PRs during the pilot will have the time to do so in Year 2, 3, etc. This doesn't mean, however, that they want to be divorced from the technical discussions that are happening. A standing meeting provides a dedicated space for those members/leadership to keep a finger on the pulse of what is happening without reviewing a dozen RFCs a week.
|
||||
4. It is easier to start with a standing meeting and move to ad-hoc later than vice versa. Especially as we build the organizational muscles around a process like RFC review, it is helpful to have the meeting in place instead of requiring individuals to advocate for ad-hoc meetings out of the gate. During filing season, for instance, I expect us to leverage ad-hoc meetings significantly more. Conversely, during May-September when a lot of planning and technical designs are choosen, we would benefit from a standing meeting to make sure we aren't crossing-wires and are moving in lockstep.
|
||||
4. It is easier to start with a standing meeting and move to ad-hoc later than vice versa. Especially as we build the organizational muscles around a process like RFC review, it is helpful to have the meeting in place instead of requiring individuals to advocate for ad-hoc meetings out of the gate. During filing season, for instance, I expect us to leverage ad-hoc meetings significantly more. Conversely, during May-September when a lot of planning and technical designs are chosen, we would benefit from a standing meeting to make sure we aren't crossing-wires and are moving in lockstep.
|
||||
5.
|
||||
|
||||
# Appendix I: Step-by-Step examples of how this all works in practice
|
||||
|
||||
If implemented, the expected development lifecycle would look roughly as follows:
|
||||
|
||||
**note: Each team/group/pod maintains autonomy in terms of how they want to define and implement the various steps, as long as 1) async and sync RFC review is incorporated into their development; and 2) IRS IT SMEs are engaged at the appropriate moments. The below will not map perfectly onto any given team's cadence, and instead aims to approximate the most-process heavy approach from which team's can choose what they would like to incorporate.**
|
||||
|
||||
1. Product requirements for a feature set are specified in a ticket (by someone)
|
||||
2. The Directly Responsible Engineer (DRE) provides an intial, rough estimate of the scope and sizing of the work, as well as the documentation required to drive alignment on an orginizationally acceptable approach:
|
||||
2. The Directly Responsible Engineer (DRE) provides an initial, rough estimate of the scope and sizing of the work, as well as the documentation required to drive alignment on an organizationally acceptable approach:
|
||||
1. If a system-wide change (see below for criteria) is involved, an RFC and ADR will be required before moving to any implementation. **IRS IT SMEs should be looped in early as key stakeholders and reviewers.**
|
||||
2. If the feature set is not a system-wide change, the DRE has discretion about if an RFC would be a helpful tool to facilitate design and/or gain consensus within a team or across teams. Some feature sets are complex enough to benefit from an RFC; others are not. Once the RFC is drafted, reviewed and approved, the DRE can begin implementation.
|
||||
3. If an RFC is not needed, the DRE can immediately begin implementation and put up a PR with a description of the work and link back to the ticket.
|
||||
3. If an RFC is needed, the DRE drafts a written proposal as a means to solicits feedback on the proposed technical approach. The document should live in the `docs/rfc` directory and be committed to the codebase in a PR in a text format like Markdown with associated artifacts (diagrams, etc.) included as needed.
|
||||
1. All initial discussion can happen asynchronously and ad-hoc.
|
||||
2. If a system-wide change is being proposed, DevOps and our IRS IT colleagues (in partiular Cyber SME and Technical Advisor) should be looped in at this stage as reviewers.
|
||||
2. If a system-wide change is being proposed, DevOps and our IRS IT colleagues (in particular Cyber SME and Technical Advisor) should be looped in at this stage as reviewers.
|
||||
3. If a system-wide change is not being proposed, the DRE and reviewers should use their discretion as to if IRS IT should be engaged or not during the RFC stage. **If they are not engaged, the assumption is that they will not need to be engaged during or after implementation.**
|
||||
4. If all questions (including those from IRS IT colleagues) are sufficiently addressed in the written RFC, the RFC can be approved and the DRE can move to implementation.
|
||||
5. If there are outstanding questions in the RFC that cannot be resolved asynchronously, the RFC is slotted for discussion during the standing "RFC Review" meeting and circulated for discussion to all RFC Review participants.
|
||||
|
@ -107,8 +112,8 @@ If implemented, the expected development lifecycle would look roughly as follows
|
|||
1. No alignment is needed on the ADR as it simply codifies the outcome of the RFC and RFC review.
|
||||
7. Once the RFC and/or ADR stages are complete, the DRE can begin implementation. At the same time, they also coordinate with IRS IT and DevOps to understand if they need any additional documentation aside from the RFC and ADR is necessary to initiate or facilitate IRS IT or IEP processes.
|
||||
|
||||
|
||||
# Appendix II: Deciding between RFC, ADR and normal PRs
|
||||
|
||||
This section provides a basic decision tree for deciding between the following processes (in order of number of parties that need to coordinate to make a change, from least to most):
|
||||
|
||||
- Ticket with a PR
|
||||
|
@ -140,6 +145,3 @@ In general, default to the process requiring the least coordination available if
|
|||
4. Spring-ifying the backend services
|
||||
5. Updating dependencies
|
||||
6. Remediating security findings
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue