Dont filter constraints from the event broadcast - this was not needed

after all
This commit is contained in:
James Agnew 2019-07-02 09:21:15 -04:00
parent 7b7136e3ab
commit 1faf7785c4
2 changed files with 6 additions and 10 deletions

View File

@ -658,7 +658,10 @@ public enum Pointcut {
/** /**
* Invoked when one or more resources may are about to be cascading a delete. * Invoked when a resource is being deleted in a cascaded delete. This means that
* some other resource is being deleted, but per use request or other
* policy, the given resource (the one supplied as a parameter to this hook)
* is also being deleted.
* <p> * <p>
* Hooks may accept the following parameters: * Hooks may accept the following parameters:
* </p> * </p>
@ -687,7 +690,8 @@ public enum Pointcut {
* </li> * </li>
* </ul> * </ul>
* <p> * <p>
* Hooks should return <code>void</code>. * Hooks should return <code>void</code>. They may choose to throw an exception however, in
* which case the delete should be rolled back.
* </p> * </p>
*/ */
STORAGE_CASCADE_DELETE( STORAGE_CASCADE_DELETE(

View File

@ -92,14 +92,6 @@ public class DeleteConflictService {
return null; return null;
} }
// Don't send two conflict events for the same source resource
Set<Long> sourceIds = new HashSet<>();
resultList = resultList
.stream()
.filter(t -> sourceIds.add(t.getSourceResourcePid()))
.collect(Collectors.toList());
return handleConflicts(theRequest, theDeleteConflicts, theEntity, theForValidate, resultList); return handleConflicts(theRequest, theDeleteConflicts, theEntity, theForValidate, resultList);
} }